summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/qlogic
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/qlogic')
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig122
-rw-r--r--drivers/net/ethernet/qlogic/Makefile10
-rw-r--r--drivers/net/ethernet/qlogic/netxen/Makefile11
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic.h1869
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c918
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c945
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h1063
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c2548
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.h269
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c1913
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c3482
-rw-r--r--drivers/net/ethernet/qlogic/qed/Makefile42
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h1007
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_chain.c371
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c2571
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.h367
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h1491
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c2414
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.h107
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c8712
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.h60
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c5520
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev_api.h529
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_devlink.c252
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_devlink.h20
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_fcoe.c1045
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_fcoe.h61
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h10932
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.c932
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.h336
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c1932
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_ops.c657
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_ops.h94
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c2423
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.h459
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iro_hsi.h500
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iscsi.c1415
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iscsi.h66
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.c3262
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.h206
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c2920
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.h454
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c2823
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.h260
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c3215
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c4244
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h1396
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h2475
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c1340
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_nvmetcp.c829
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_nvmetcp.h103
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.c375
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.h39
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ooo.c466
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ooo.h172
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ptp.c433
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ptp.h9
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c2041
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.h210
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h1725
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c1143
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.h34
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_selftest.c185
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_selftest.h53
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h477
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c595
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c1051
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c5305
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.h501
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c1718
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.h1275
-rw-r--r--drivers/net/ethernet/qlogic/qede/Makefile8
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h614
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_dcbnl.c350
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c2363
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c2074
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c1805
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c2950
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.c547
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.h38
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_rdma.c355
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c3930
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.h1188
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/Makefile16
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h2418
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c4241
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h666
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c2600
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c284
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c1435
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c1146
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h114
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c1890
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h947
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c1685
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h225
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c1302
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c2227
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c4259
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c1455
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h277
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c2229
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c2046
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c1431
104 files changed, 147914 insertions, 0 deletions
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
new file mode 100644
index 000000000..120335323
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -0,0 +1,122 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# QLogic network device configuration
+#
+
+config NET_VENDOR_QLOGIC
+ bool "QLogic devices"
+ default y
+ depends on PCI
+ help
+ If you have a network (Ethernet) card belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about QLogic cards. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_QLOGIC
+
+config QLA3XXX
+ tristate "QLogic QLA3XXX Network Driver Support"
+ depends on PCI
+ help
+ This driver supports QLogic ISP3XXX gigabit Ethernet cards.
+
+ To compile this driver as a module, choose M here: the module
+ will be called qla3xxx.
+
+config QLCNIC
+ tristate "QLOGIC QLCNIC 1/10Gb Converged Ethernet NIC Support"
+ depends on PCI
+ select FW_LOADER
+ help
+ This driver supports QLogic QLE8240 and QLE8242 Converged Ethernet
+ devices.
+
+config QLCNIC_SRIOV
+ bool "QLOGIC QLCNIC 83XX family SR-IOV Support"
+ depends on QLCNIC && PCI_IOV
+ default y
+ help
+ This configuration parameter enables Single Root Input Output
+ Virtualization support for QLE83XX Converged Ethernet devices.
+ This allows for virtual function acceleration in virtualized
+ environments.
+
+config QLCNIC_DCB
+ bool "QLOGIC QLCNIC 82XX and 83XX family DCB Support"
+ depends on QLCNIC && DCB
+ default y
+ help
+ This configuration parameter enables DCB support in QLE83XX
+ and QLE82XX Converged Ethernet devices. This allows for DCB
+ get operations support through rtNetlink interface. Only CEE
+ mode of DCB is supported. PG and PFC values are related only
+ to Tx.
+
+config QLCNIC_HWMON
+ bool "QLOGIC QLCNIC 82XX and 83XX family HWMON support"
+ depends on QLCNIC && HWMON && !(QLCNIC=y && HWMON=m)
+ default y
+ help
+ This configuration parameter can be used to read the
+ board temperature in Converged Ethernet devices
+ supported by qlcnic.
+
+ This data is available via the hwmon sysfs interface.
+
+config NETXEN_NIC
+ tristate "NetXen Multi port (1/10) Gigabit Ethernet NIC"
+ depends on PCI
+ select FW_LOADER
+ help
+ This enables the support for NetXen's Gigabit Ethernet card.
+
+config QED
+ tristate "QLogic QED 25/40/100Gb core driver"
+ depends on PCI
+ select ZLIB_INFLATE
+ select CRC8
+ select CRC32
+ select NET_DEVLINK
+ help
+ This enables the support for Marvell FastLinQ adapters family.
+
+config QED_LL2
+ bool
+
+config QED_SRIOV
+ bool "QLogic QED 25/40/100Gb SR-IOV support"
+ depends on QED && PCI_IOV
+ default y
+ help
+ This configuration parameter enables Single Root Input Output
+ Virtualization support for QED devices.
+ This allows for virtual function acceleration in virtualized
+ environments.
+
+config QEDE
+ tristate "QLogic QED 25/40/100Gb Ethernet NIC"
+ depends on QED
+ depends on PTP_1588_CLOCK_OPTIONAL
+ help
+ This enables the support for Marvell FastLinQ adapters family,
+ ethernet driver.
+
+config QED_RDMA
+ bool
+
+config QED_ISCSI
+ bool
+
+config QED_NVMETCP
+ bool
+
+config QED_FCOE
+ bool
+
+config QED_OOO
+ bool
+
+endif # NET_VENDOR_QLOGIC
diff --git a/drivers/net/ethernet/qlogic/Makefile b/drivers/net/ethernet/qlogic/Makefile
new file mode 100644
index 000000000..1ae4a0743
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the QLogic network device drivers.
+#
+
+obj-$(CONFIG_QLA3XXX) += qla3xxx.o
+obj-$(CONFIG_QLCNIC) += qlcnic/
+obj-$(CONFIG_NETXEN_NIC) += netxen/
+obj-$(CONFIG_QED) += qed/
+obj-$(CONFIG_QEDE)+= qede/
diff --git a/drivers/net/ethernet/qlogic/netxen/Makefile b/drivers/net/ethernet/qlogic/netxen/Makefile
new file mode 100644
index 000000000..d6e80b3eb
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/netxen/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+# Copyright (C) 2003 - 2009 NetXen, Inc.
+# Copyright (C) 2009 - QLogic Corporation.
+# All rights reserved.
+#
+
+
+obj-$(CONFIG_NETXEN_NIC) := netxen_nic.o
+
+netxen_nic-y := netxen_nic_hw.o netxen_nic_main.o netxen_nic_init.o \
+ netxen_nic_ethtool.o netxen_nic_ctx.o
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
new file mode 100644
index 000000000..f13fa7396
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
@@ -0,0 +1,1869 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2003 - 2009 NetXen, Inc.
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ */
+
+#ifndef _NETXEN_NIC_H_
+#define _NETXEN_NIC_H_
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ip.h>
+#include <linux/in.h>
+#include <linux/tcp.h>
+#include <linux/skbuff.h>
+#include <linux/firmware.h>
+
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/timer.h>
+
+#include <linux/vmalloc.h>
+
+#include <asm/io.h>
+#include <asm/byteorder.h>
+
+#include "netxen_nic_hdr.h"
+#include "netxen_nic_hw.h"
+
+#define _NETXEN_NIC_LINUX_MAJOR 4
+#define _NETXEN_NIC_LINUX_MINOR 0
+#define _NETXEN_NIC_LINUX_SUBVERSION 82
+#define NETXEN_NIC_LINUX_VERSIONID "4.0.82"
+
+#define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
+#define _major(v) (((v) >> 24) & 0xff)
+#define _minor(v) (((v) >> 16) & 0xff)
+#define _build(v) ((v) & 0xffff)
+
+/* version in image has weird encoding:
+ * 7:0 - major
+ * 15:8 - minor
+ * 31:16 - build (little endian)
+ */
+#define NETXEN_DECODE_VERSION(v) \
+ NETXEN_VERSION_CODE(((v) & 0xff), (((v) >> 8) & 0xff), ((v) >> 16))
+
+#define NETXEN_NUM_FLASH_SECTORS (64)
+#define NETXEN_FLASH_SECTOR_SIZE (64 * 1024)
+#define NETXEN_FLASH_TOTAL_SIZE (NETXEN_NUM_FLASH_SECTORS \
+ * NETXEN_FLASH_SECTOR_SIZE)
+
+#define RCV_DESC_RINGSIZE(rds_ring) \
+ (sizeof(struct rcv_desc) * (rds_ring)->num_desc)
+#define RCV_BUFF_RINGSIZE(rds_ring) \
+ (sizeof(struct netxen_rx_buffer) * rds_ring->num_desc)
+#define STATUS_DESC_RINGSIZE(sds_ring) \
+ (sizeof(struct status_desc) * (sds_ring)->num_desc)
+#define TX_BUFF_RINGSIZE(tx_ring) \
+ (sizeof(struct netxen_cmd_buffer) * tx_ring->num_desc)
+#define TX_DESC_RINGSIZE(tx_ring) \
+ (sizeof(struct cmd_desc_type0) * tx_ring->num_desc)
+
+#define find_diff_among(a,b,range) ((a)<(b)?((b)-(a)):((b)+(range)-(a)))
+
+#define NETXEN_RCV_PRODUCER_OFFSET 0
+#define NETXEN_RCV_PEG_DB_ID 2
+#define NETXEN_HOST_DUMMY_DMA_SIZE 1024
+#define FLASH_SUCCESS 0
+
+#define ADDR_IN_WINDOW1(off) \
+ ((off > NETXEN_CRB_PCIX_HOST2) && (off < NETXEN_CRB_MAX)) ? 1 : 0
+
+#define ADDR_IN_RANGE(addr, low, high) \
+ (((addr) < (high)) && ((addr) >= (low)))
+
+/*
+ * normalize a 64MB crb address to 32MB PCI window
+ * To use NETXEN_CRB_NORMALIZE, window _must_ be set to 1
+ */
+#define NETXEN_CRB_NORMAL(reg) \
+ ((reg) - NETXEN_CRB_PCIX_HOST2 + NETXEN_CRB_PCIX_HOST)
+
+#define NETXEN_CRB_NORMALIZE(adapter, reg) \
+ pci_base_offset(adapter, NETXEN_CRB_NORMAL(reg))
+
+#define DB_NORMALIZE(adapter, off) \
+ (adapter->ahw.db_base + (off))
+
+#define NX_P2_C0 0x24
+#define NX_P2_C1 0x25
+#define NX_P3_A0 0x30
+#define NX_P3_A2 0x30
+#define NX_P3_B0 0x40
+#define NX_P3_B1 0x41
+#define NX_P3_B2 0x42
+#define NX_P3P_A0 0x50
+
+#define NX_IS_REVISION_P2(REVISION) (REVISION <= NX_P2_C1)
+#define NX_IS_REVISION_P3(REVISION) (REVISION >= NX_P3_A0)
+#define NX_IS_REVISION_P3P(REVISION) (REVISION >= NX_P3P_A0)
+
+#define FIRST_PAGE_GROUP_START 0
+#define FIRST_PAGE_GROUP_END 0x100000
+
+#define SECOND_PAGE_GROUP_START 0x6000000
+#define SECOND_PAGE_GROUP_END 0x68BC000
+
+#define THIRD_PAGE_GROUP_START 0x70E4000
+#define THIRD_PAGE_GROUP_END 0x8000000
+
+#define FIRST_PAGE_GROUP_SIZE FIRST_PAGE_GROUP_END - FIRST_PAGE_GROUP_START
+#define SECOND_PAGE_GROUP_SIZE SECOND_PAGE_GROUP_END - SECOND_PAGE_GROUP_START
+#define THIRD_PAGE_GROUP_SIZE THIRD_PAGE_GROUP_END - THIRD_PAGE_GROUP_START
+
+#define P2_MAX_MTU (8000)
+#define P3_MAX_MTU (9600)
+#define NX_ETHERMTU 1500
+#define NX_MAX_ETHERHDR 32 /* This contains some padding */
+
+#define NX_P2_RX_BUF_MAX_LEN 1760
+#define NX_P3_RX_BUF_MAX_LEN (NX_MAX_ETHERHDR + NX_ETHERMTU)
+#define NX_P2_RX_JUMBO_BUF_MAX_LEN (NX_MAX_ETHERHDR + P2_MAX_MTU)
+#define NX_P3_RX_JUMBO_BUF_MAX_LEN (NX_MAX_ETHERHDR + P3_MAX_MTU)
+#define NX_CT_DEFAULT_RX_BUF_LEN 2048
+#define NX_LRO_BUFFER_EXTRA 2048
+
+#define NX_RX_LRO_BUFFER_LENGTH (8060)
+
+/*
+ * Maximum number of ring contexts
+ */
+#define MAX_RING_CTX 1
+
+/* Opcodes to be used with the commands */
+#define TX_ETHER_PKT 0x01
+#define TX_TCP_PKT 0x02
+#define TX_UDP_PKT 0x03
+#define TX_IP_PKT 0x04
+#define TX_TCP_LSO 0x05
+#define TX_TCP_LSO6 0x06
+#define TX_IPSEC 0x07
+#define TX_IPSEC_CMD 0x0a
+#define TX_TCPV6_PKT 0x0b
+#define TX_UDPV6_PKT 0x0c
+
+/* The following opcodes are for internal consumption. */
+#define NETXEN_CONTROL_OP 0x10
+#define PEGNET_REQUEST 0x11
+
+#define MAX_NUM_CARDS 4
+
+#define NETXEN_MAX_FRAGS_PER_TX 14
+#define MAX_TSO_HEADER_DESC 2
+#define MGMT_CMD_DESC_RESV 4
+#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \
+ + MGMT_CMD_DESC_RESV)
+#define NX_MAX_TX_TIMEOUTS 2
+
+/*
+ * Following are the states of the Phantom. Phantom will set them and
+ * Host will read to check if the fields are correct.
+ */
+#define PHAN_INITIALIZE_START 0xff00
+#define PHAN_INITIALIZE_FAILED 0xffff
+#define PHAN_INITIALIZE_COMPLETE 0xff01
+
+/* Host writes the following to notify that it has done the init-handshake */
+#define PHAN_INITIALIZE_ACK 0xf00f
+
+#define NUM_RCV_DESC_RINGS 3
+#define NUM_STS_DESC_RINGS 4
+
+#define RCV_RING_NORMAL 0
+#define RCV_RING_JUMBO 1
+#define RCV_RING_LRO 2
+
+#define MIN_CMD_DESCRIPTORS 64
+#define MIN_RCV_DESCRIPTORS 64
+#define MIN_JUMBO_DESCRIPTORS 32
+
+#define MAX_CMD_DESCRIPTORS 1024
+#define MAX_RCV_DESCRIPTORS_1G 4096
+#define MAX_RCV_DESCRIPTORS_10G 8192
+#define MAX_JUMBO_RCV_DESCRIPTORS_1G 512
+#define MAX_JUMBO_RCV_DESCRIPTORS_10G 1024
+#define MAX_LRO_RCV_DESCRIPTORS 8
+
+#define DEFAULT_RCV_DESCRIPTORS_1G 2048
+#define DEFAULT_RCV_DESCRIPTORS_10G 4096
+
+#define NETXEN_CTX_SIGNATURE 0xdee0
+#define NETXEN_CTX_SIGNATURE_V2 0x0002dee0
+#define NETXEN_CTX_RESET 0xbad0
+#define NETXEN_CTX_D3_RESET 0xacc0
+#define NETXEN_RCV_PRODUCER(ringid) (ringid)
+
+#define PHAN_PEG_RCV_INITIALIZED 0xff01
+#define PHAN_PEG_RCV_START_INITIALIZE 0xff00
+
+#define get_next_index(index, length) \
+ (((index) + 1) & ((length) - 1))
+
+#define get_index_range(index,length,count) \
+ (((index) + (count)) & ((length) - 1))
+
+#define MPORT_SINGLE_FUNCTION_MODE 0x1111
+#define MPORT_MULTI_FUNCTION_MODE 0x2222
+
+#define NX_MAX_PCI_FUNC 8
+
+/*
+ * NetXen host-peg signal message structure
+ *
+ * Bit 0-1 : peg_id => 0x2 for tx and 01 for rx
+ * Bit 2 : priv_id => must be 1
+ * Bit 3-17 : count => for doorbell
+ * Bit 18-27 : ctx_id => Context id
+ * Bit 28-31 : opcode
+ */
+
+typedef u32 netxen_ctx_msg;
+
+#define netxen_set_msg_peg_id(config_word, val) \
+ ((config_word) &= ~3, (config_word) |= val & 3)
+#define netxen_set_msg_privid(config_word) \
+ ((config_word) |= 1 << 2)
+#define netxen_set_msg_count(config_word, val) \
+ ((config_word) &= ~(0x7fff<<3), (config_word) |= (val & 0x7fff) << 3)
+#define netxen_set_msg_ctxid(config_word, val) \
+ ((config_word) &= ~(0x3ff<<18), (config_word) |= (val & 0x3ff) << 18)
+#define netxen_set_msg_opcode(config_word, val) \
+ ((config_word) &= ~(0xf<<28), (config_word) |= (val & 0xf) << 28)
+
+struct netxen_rcv_ring {
+ __le64 addr;
+ __le32 size;
+ __le32 rsrvd;
+};
+
+struct netxen_sts_ring {
+ __le64 addr;
+ __le32 size;
+ __le16 msi_index;
+ __le16 rsvd;
+} ;
+
+struct netxen_ring_ctx {
+
+ /* one command ring */
+ __le64 cmd_consumer_offset;
+ __le64 cmd_ring_addr;
+ __le32 cmd_ring_size;
+ __le32 rsrvd;
+
+ /* three receive rings */
+ struct netxen_rcv_ring rcv_rings[NUM_RCV_DESC_RINGS];
+
+ __le64 sts_ring_addr;
+ __le32 sts_ring_size;
+
+ __le32 ctx_id;
+
+ __le64 rsrvd_2[3];
+ __le32 sts_ring_count;
+ __le32 rsrvd_3;
+ struct netxen_sts_ring sts_rings[NUM_STS_DESC_RINGS];
+
+} __attribute__ ((aligned(64)));
+
+/*
+ * Following data structures describe the descriptors that will be used.
+ * Added fileds of tcpHdrSize and ipHdrSize, The driver needs to do it only when
+ * we are doing LSO (above the 1500 size packet) only.
+ */
+
+/*
+ * The size of reference handle been changed to 16 bits to pass the MSS fields
+ * for the LSO packet
+ */
+
+#define FLAGS_CHECKSUM_ENABLED 0x01
+#define FLAGS_LSO_ENABLED 0x02
+#define FLAGS_IPSEC_SA_ADD 0x04
+#define FLAGS_IPSEC_SA_DELETE 0x08
+#define FLAGS_VLAN_TAGGED 0x10
+#define FLAGS_VLAN_OOB 0x40
+
+#define netxen_set_tx_vlan_tci(cmd_desc, v) \
+ (cmd_desc)->vlan_TCI = cpu_to_le16(v);
+
+#define netxen_set_cmd_desc_port(cmd_desc, var) \
+ ((cmd_desc)->port_ctxid |= ((var) & 0x0F))
+#define netxen_set_cmd_desc_ctxid(cmd_desc, var) \
+ ((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0))
+
+#define netxen_set_tx_port(_desc, _port) \
+ (_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0)
+
+#define netxen_set_tx_flags_opcode(_desc, _flags, _opcode) \
+ (_desc)->flags_opcode = \
+ cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7))
+
+#define netxen_set_tx_frags_len(_desc, _frags, _len) \
+ (_desc)->nfrags__length = \
+ cpu_to_le32(((_frags) & 0xff) | (((_len) & 0xffffff) << 8))
+
+struct cmd_desc_type0 {
+ u8 tcp_hdr_offset; /* For LSO only */
+ u8 ip_hdr_offset; /* For LSO only */
+ __le16 flags_opcode; /* 15:13 unused, 12:7 opcode, 6:0 flags */
+ __le32 nfrags__length; /* 31:8 total len, 7:0 frag count */
+
+ __le64 addr_buffer2;
+
+ __le16 reference_handle;
+ __le16 mss;
+ u8 port_ctxid; /* 7:4 ctxid 3:0 port */
+ u8 total_hdr_length; /* LSO only : MAC+IP+TCP Hdr size */
+ __le16 conn_id; /* IPSec offoad only */
+
+ __le64 addr_buffer3;
+ __le64 addr_buffer1;
+
+ __le16 buffer_length[4];
+
+ __le64 addr_buffer4;
+
+ __le32 reserved2;
+ __le16 reserved;
+ __le16 vlan_TCI;
+
+} __attribute__ ((aligned(64)));
+
+/* Note: sizeof(rcv_desc) should always be a multiple of 2 */
+struct rcv_desc {
+ __le16 reference_handle;
+ __le16 reserved;
+ __le32 buffer_length; /* allocated buffer length (usually 2K) */
+ __le64 addr_buffer;
+};
+
+/* opcode field in status_desc */
+#define NETXEN_NIC_SYN_OFFLOAD 0x03
+#define NETXEN_NIC_RXPKT_DESC 0x04
+#define NETXEN_OLD_RXPKT_DESC 0x3f
+#define NETXEN_NIC_RESPONSE_DESC 0x05
+#define NETXEN_NIC_LRO_DESC 0x12
+
+/* for status field in status_desc */
+#define STATUS_NEED_CKSUM (1)
+#define STATUS_CKSUM_OK (2)
+
+/* owner bits of status_desc */
+#define STATUS_OWNER_HOST (0x1ULL << 56)
+#define STATUS_OWNER_PHANTOM (0x2ULL << 56)
+
+/* Status descriptor:
+ 0-3 port, 4-7 status, 8-11 type, 12-27 total_length
+ 28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset
+ 53-55 desc_cnt, 56-57 owner, 58-63 opcode
+ */
+#define netxen_get_sts_port(sts_data) \
+ ((sts_data) & 0x0F)
+#define netxen_get_sts_status(sts_data) \
+ (((sts_data) >> 4) & 0x0F)
+#define netxen_get_sts_type(sts_data) \
+ (((sts_data) >> 8) & 0x0F)
+#define netxen_get_sts_totallength(sts_data) \
+ (((sts_data) >> 12) & 0xFFFF)
+#define netxen_get_sts_refhandle(sts_data) \
+ (((sts_data) >> 28) & 0xFFFF)
+#define netxen_get_sts_prot(sts_data) \
+ (((sts_data) >> 44) & 0x0F)
+#define netxen_get_sts_pkt_offset(sts_data) \
+ (((sts_data) >> 48) & 0x1F)
+#define netxen_get_sts_desc_cnt(sts_data) \
+ (((sts_data) >> 53) & 0x7)
+#define netxen_get_sts_opcode(sts_data) \
+ (((sts_data) >> 58) & 0x03F)
+
+#define netxen_get_lro_sts_refhandle(sts_data) \
+ ((sts_data) & 0x0FFFF)
+#define netxen_get_lro_sts_length(sts_data) \
+ (((sts_data) >> 16) & 0x0FFFF)
+#define netxen_get_lro_sts_l2_hdr_offset(sts_data) \
+ (((sts_data) >> 32) & 0x0FF)
+#define netxen_get_lro_sts_l4_hdr_offset(sts_data) \
+ (((sts_data) >> 40) & 0x0FF)
+#define netxen_get_lro_sts_timestamp(sts_data) \
+ (((sts_data) >> 48) & 0x1)
+#define netxen_get_lro_sts_type(sts_data) \
+ (((sts_data) >> 49) & 0x7)
+#define netxen_get_lro_sts_push_flag(sts_data) \
+ (((sts_data) >> 52) & 0x1)
+#define netxen_get_lro_sts_seq_number(sts_data) \
+ ((sts_data) & 0x0FFFFFFFF)
+#define netxen_get_lro_sts_mss(sts_data1) \
+ ((sts_data1 >> 32) & 0x0FFFF)
+
+
+struct status_desc {
+ __le64 status_desc_data[2];
+} __attribute__ ((aligned(16)));
+
+/* UNIFIED ROMIMAGE *************************/
+#define NX_UNI_DIR_SECT_PRODUCT_TBL 0x0
+#define NX_UNI_DIR_SECT_BOOTLD 0x6
+#define NX_UNI_DIR_SECT_FW 0x7
+
+/*Offsets */
+#define NX_UNI_CHIP_REV_OFF 10
+#define NX_UNI_FLAGS_OFF 11
+#define NX_UNI_BIOS_VERSION_OFF 12
+#define NX_UNI_BOOTLD_IDX_OFF 27
+#define NX_UNI_FIRMWARE_IDX_OFF 29
+
+struct uni_table_desc{
+ uint32_t findex;
+ uint32_t num_entries;
+ uint32_t entry_size;
+ uint32_t reserved[5];
+};
+
+struct uni_data_desc{
+ uint32_t findex;
+ uint32_t size;
+ uint32_t reserved[5];
+};
+
+/* UNIFIED ROMIMAGE *************************/
+
+/* The version of the main data structure */
+#define NETXEN_BDINFO_VERSION 1
+
+/* Magic number to let user know flash is programmed */
+#define NETXEN_BDINFO_MAGIC 0x12345678
+
+/* Max number of Gig ports on a Phantom board */
+#define NETXEN_MAX_PORTS 4
+
+#define NETXEN_BRDTYPE_P1_BD 0x0000
+#define NETXEN_BRDTYPE_P1_SB 0x0001
+#define NETXEN_BRDTYPE_P1_SMAX 0x0002
+#define NETXEN_BRDTYPE_P1_SOCK 0x0003
+
+#define NETXEN_BRDTYPE_P2_SOCK_31 0x0008
+#define NETXEN_BRDTYPE_P2_SOCK_35 0x0009
+#define NETXEN_BRDTYPE_P2_SB35_4G 0x000a
+#define NETXEN_BRDTYPE_P2_SB31_10G 0x000b
+#define NETXEN_BRDTYPE_P2_SB31_2G 0x000c
+
+#define NETXEN_BRDTYPE_P2_SB31_10G_IMEZ 0x000d
+#define NETXEN_BRDTYPE_P2_SB31_10G_HMEZ 0x000e
+#define NETXEN_BRDTYPE_P2_SB31_10G_CX4 0x000f
+
+#define NETXEN_BRDTYPE_P3_REF_QG 0x0021
+#define NETXEN_BRDTYPE_P3_HMEZ 0x0022
+#define NETXEN_BRDTYPE_P3_10G_CX4_LP 0x0023
+#define NETXEN_BRDTYPE_P3_4_GB 0x0024
+#define NETXEN_BRDTYPE_P3_IMEZ 0x0025
+#define NETXEN_BRDTYPE_P3_10G_SFP_PLUS 0x0026
+#define NETXEN_BRDTYPE_P3_10000_BASE_T 0x0027
+#define NETXEN_BRDTYPE_P3_XG_LOM 0x0028
+#define NETXEN_BRDTYPE_P3_4_GB_MM 0x0029
+#define NETXEN_BRDTYPE_P3_10G_SFP_CT 0x002a
+#define NETXEN_BRDTYPE_P3_10G_SFP_QT 0x002b
+#define NETXEN_BRDTYPE_P3_10G_CX4 0x0031
+#define NETXEN_BRDTYPE_P3_10G_XFP 0x0032
+#define NETXEN_BRDTYPE_P3_10G_TP 0x0080
+
+/* Flash memory map */
+#define NETXEN_CRBINIT_START 0 /* crbinit section */
+#define NETXEN_BRDCFG_START 0x4000 /* board config */
+#define NETXEN_INITCODE_START 0x6000 /* pegtune code */
+#define NETXEN_BOOTLD_START 0x10000 /* bootld */
+#define NETXEN_IMAGE_START 0x43000 /* compressed image */
+#define NETXEN_SECONDARY_START 0x200000 /* backup images */
+#define NETXEN_PXE_START 0x3E0000 /* PXE boot rom */
+#define NETXEN_USER_START 0x3E8000 /* Firmware info */
+#define NETXEN_FIXED_START 0x3F0000 /* backup of crbinit */
+#define NETXEN_USER_START_OLD NETXEN_PXE_START /* very old flash */
+
+#define NX_OLD_MAC_ADDR_OFFSET (NETXEN_USER_START)
+#define NX_FW_VERSION_OFFSET (NETXEN_USER_START+0x408)
+#define NX_FW_SIZE_OFFSET (NETXEN_USER_START+0x40c)
+#define NX_FW_MAC_ADDR_OFFSET (NETXEN_USER_START+0x418)
+#define NX_FW_SERIAL_NUM_OFFSET (NETXEN_USER_START+0x81c)
+#define NX_BIOS_VERSION_OFFSET (NETXEN_USER_START+0x83c)
+
+#define NX_HDR_VERSION_OFFSET (NETXEN_BRDCFG_START)
+#define NX_BRDTYPE_OFFSET (NETXEN_BRDCFG_START+0x8)
+#define NX_FW_MAGIC_OFFSET (NETXEN_BRDCFG_START+0x128)
+
+#define NX_FW_MIN_SIZE (0x3fffff)
+#define NX_P2_MN_ROMIMAGE 0
+#define NX_P3_CT_ROMIMAGE 1
+#define NX_P3_MN_ROMIMAGE 2
+#define NX_UNIFIED_ROMIMAGE 3
+#define NX_FLASH_ROMIMAGE 4
+#define NX_UNKNOWN_ROMIMAGE 0xff
+
+#define NX_P2_MN_ROMIMAGE_NAME "nxromimg.bin"
+#define NX_P3_CT_ROMIMAGE_NAME "nx3fwct.bin"
+#define NX_P3_MN_ROMIMAGE_NAME "nx3fwmn.bin"
+#define NX_UNIFIED_ROMIMAGE_NAME "phanfw.bin"
+#define NX_FLASH_ROMIMAGE_NAME "flash"
+
+extern char netxen_nic_driver_name[];
+
+/* Number of status descriptors to handle per interrupt */
+#define MAX_STATUS_HANDLE (64)
+
+/*
+ * netxen_skb_frag{} is to contain mapping info for each SG list. This
+ * has to be freed when DMA is complete. This is part of netxen_tx_buffer{}.
+ */
+struct netxen_skb_frag {
+ u64 dma;
+ u64 length;
+};
+
+struct netxen_recv_crb {
+ u32 crb_rcv_producer[NUM_RCV_DESC_RINGS];
+ u32 crb_sts_consumer[NUM_STS_DESC_RINGS];
+ u32 sw_int_mask[NUM_STS_DESC_RINGS];
+};
+
+/* Following defines are for the state of the buffers */
+#define NETXEN_BUFFER_FREE 0
+#define NETXEN_BUFFER_BUSY 1
+
+/*
+ * There will be one netxen_buffer per skb packet. These will be
+ * used to save the dma info for pci_unmap_page()
+ */
+struct netxen_cmd_buffer {
+ struct sk_buff *skb;
+ struct netxen_skb_frag frag_array[MAX_SKB_FRAGS + 1];
+ u32 frag_count;
+};
+
+/* In rx_buffer, we do not need multiple fragments as is a single buffer */
+struct netxen_rx_buffer {
+ struct list_head list;
+ struct sk_buff *skb;
+ u64 dma;
+ u16 ref_handle;
+ u16 state;
+};
+
+/* Board types */
+#define NETXEN_NIC_GBE 0x01
+#define NETXEN_NIC_XGBE 0x02
+
+/*
+ * One hardware_context{} per adapter
+ * contains interrupt info as well shared hardware info.
+ */
+struct netxen_hardware_context {
+ void __iomem *pci_base0;
+ void __iomem *pci_base1;
+ void __iomem *pci_base2;
+ void __iomem *db_base;
+ void __iomem *ocm_win_crb;
+
+ unsigned long db_len;
+ unsigned long pci_len0;
+
+ u32 ocm_win;
+ u32 crb_win;
+
+ rwlock_t crb_lock;
+ spinlock_t mem_lock;
+
+ u8 cut_through;
+ u8 revision_id;
+ u8 pci_func;
+ u8 linkup;
+ u16 port_type;
+ u16 board_type;
+};
+
+#define MINIMUM_ETHERNET_FRAME_SIZE 64 /* With FCS */
+#define ETHERNET_FCS_SIZE 4
+
+struct netxen_adapter_stats {
+ u64 xmitcalled;
+ u64 xmitfinished;
+ u64 rxdropped;
+ u64 txdropped;
+ u64 csummed;
+ u64 rx_pkts;
+ u64 lro_pkts;
+ u64 rxbytes;
+ u64 txbytes;
+};
+
+/*
+ * Rcv Descriptor Context. One such per Rcv Descriptor. There may
+ * be one Rcv Descriptor for normal packets, one for jumbo and may be others.
+ */
+struct nx_host_rds_ring {
+ u32 producer;
+ u32 num_desc;
+ u32 dma_size;
+ u32 skb_size;
+ u32 flags;
+ void __iomem *crb_rcv_producer;
+ struct rcv_desc *desc_head;
+ struct netxen_rx_buffer *rx_buf_arr;
+ struct list_head free_list;
+ spinlock_t lock;
+ dma_addr_t phys_addr;
+};
+
+struct nx_host_sds_ring {
+ u32 consumer;
+ u32 num_desc;
+ void __iomem *crb_sts_consumer;
+ void __iomem *crb_intr_mask;
+
+ struct status_desc *desc_head;
+ struct netxen_adapter *adapter;
+ struct napi_struct napi;
+ struct list_head free_list[NUM_RCV_DESC_RINGS];
+
+ int irq;
+
+ dma_addr_t phys_addr;
+ char name[IFNAMSIZ+4];
+};
+
+struct nx_host_tx_ring {
+ u32 producer;
+ __le32 *hw_consumer;
+ u32 sw_consumer;
+ void __iomem *crb_cmd_producer;
+ void __iomem *crb_cmd_consumer;
+ u32 num_desc;
+
+ struct netdev_queue *txq;
+
+ struct netxen_cmd_buffer *cmd_buf_arr;
+ struct cmd_desc_type0 *desc_head;
+ dma_addr_t phys_addr;
+};
+
+/*
+ * Receive context. There is one such structure per instance of the
+ * receive processing. Any state information that is relevant to
+ * the receive, and is must be in this structure. The global data may be
+ * present elsewhere.
+ */
+struct netxen_recv_context {
+ u32 state;
+ u16 context_id;
+ u16 virt_port;
+
+ struct nx_host_rds_ring *rds_rings;
+ struct nx_host_sds_ring *sds_rings;
+
+ struct netxen_ring_ctx *hwctx;
+ dma_addr_t phys_addr;
+};
+
+struct _cdrp_cmd {
+ u32 cmd;
+ u32 arg1;
+ u32 arg2;
+ u32 arg3;
+};
+
+struct netxen_cmd_args {
+ struct _cdrp_cmd req;
+ struct _cdrp_cmd rsp;
+};
+
+/* New HW context creation */
+
+#define NX_OS_CRB_RETRY_COUNT 4000
+#define NX_CDRP_SIGNATURE_MAKE(pcifn, version) \
+ (((pcifn) & 0xff) | (((version) & 0xff) << 8) | (0xcafe << 16))
+
+#define NX_CDRP_CLEAR 0x00000000
+#define NX_CDRP_CMD_BIT 0x80000000
+
+/*
+ * All responses must have the NX_CDRP_CMD_BIT cleared
+ * in the crb NX_CDRP_CRB_OFFSET.
+ */
+#define NX_CDRP_FORM_RSP(rsp) (rsp)
+#define NX_CDRP_IS_RSP(rsp) (((rsp) & NX_CDRP_CMD_BIT) == 0)
+
+#define NX_CDRP_RSP_OK 0x00000001
+#define NX_CDRP_RSP_FAIL 0x00000002
+#define NX_CDRP_RSP_TIMEOUT 0x00000003
+
+/*
+ * All commands must have the NX_CDRP_CMD_BIT set in
+ * the crb NX_CDRP_CRB_OFFSET.
+ */
+#define NX_CDRP_FORM_CMD(cmd) (NX_CDRP_CMD_BIT | (cmd))
+#define NX_CDRP_IS_CMD(cmd) (((cmd) & NX_CDRP_CMD_BIT) != 0)
+
+#define NX_CDRP_CMD_SUBMIT_CAPABILITIES 0x00000001
+#define NX_CDRP_CMD_READ_MAX_RDS_PER_CTX 0x00000002
+#define NX_CDRP_CMD_READ_MAX_SDS_PER_CTX 0x00000003
+#define NX_CDRP_CMD_READ_MAX_RULES_PER_CTX 0x00000004
+#define NX_CDRP_CMD_READ_MAX_RX_CTX 0x00000005
+#define NX_CDRP_CMD_READ_MAX_TX_CTX 0x00000006
+#define NX_CDRP_CMD_CREATE_RX_CTX 0x00000007
+#define NX_CDRP_CMD_DESTROY_RX_CTX 0x00000008
+#define NX_CDRP_CMD_CREATE_TX_CTX 0x00000009
+#define NX_CDRP_CMD_DESTROY_TX_CTX 0x0000000a
+#define NX_CDRP_CMD_SETUP_STATISTICS 0x0000000e
+#define NX_CDRP_CMD_GET_STATISTICS 0x0000000f
+#define NX_CDRP_CMD_DELETE_STATISTICS 0x00000010
+#define NX_CDRP_CMD_SET_MTU 0x00000012
+#define NX_CDRP_CMD_READ_PHY 0x00000013
+#define NX_CDRP_CMD_WRITE_PHY 0x00000014
+#define NX_CDRP_CMD_READ_HW_REG 0x00000015
+#define NX_CDRP_CMD_GET_FLOW_CTL 0x00000016
+#define NX_CDRP_CMD_SET_FLOW_CTL 0x00000017
+#define NX_CDRP_CMD_READ_MAX_MTU 0x00000018
+#define NX_CDRP_CMD_READ_MAX_LRO 0x00000019
+#define NX_CDRP_CMD_CONFIGURE_TOE 0x0000001a
+#define NX_CDRP_CMD_FUNC_ATTRIB 0x0000001b
+#define NX_CDRP_CMD_READ_PEXQ_PARAMETERS 0x0000001c
+#define NX_CDRP_CMD_GET_LIC_CAPABILITIES 0x0000001d
+#define NX_CDRP_CMD_READ_MAX_LRO_PER_BOARD 0x0000001e
+#define NX_CDRP_CMD_CONFIG_GBE_PORT 0x0000001f
+#define NX_CDRP_CMD_MAX 0x00000020
+
+#define NX_RCODE_SUCCESS 0
+#define NX_RCODE_NO_HOST_MEM 1
+#define NX_RCODE_NO_HOST_RESOURCE 2
+#define NX_RCODE_NO_CARD_CRB 3
+#define NX_RCODE_NO_CARD_MEM 4
+#define NX_RCODE_NO_CARD_RESOURCE 5
+#define NX_RCODE_INVALID_ARGS 6
+#define NX_RCODE_INVALID_ACTION 7
+#define NX_RCODE_INVALID_STATE 8
+#define NX_RCODE_NOT_SUPPORTED 9
+#define NX_RCODE_NOT_PERMITTED 10
+#define NX_RCODE_NOT_READY 11
+#define NX_RCODE_DOES_NOT_EXIST 12
+#define NX_RCODE_ALREADY_EXISTS 13
+#define NX_RCODE_BAD_SIGNATURE 14
+#define NX_RCODE_CMD_NOT_IMPL 15
+#define NX_RCODE_CMD_INVALID 16
+#define NX_RCODE_TIMEOUT 17
+#define NX_RCODE_CMD_FAILED 18
+#define NX_RCODE_MAX_EXCEEDED 19
+#define NX_RCODE_MAX 20
+
+#define NX_DESTROY_CTX_RESET 0
+#define NX_DESTROY_CTX_D3_RESET 1
+#define NX_DESTROY_CTX_MAX 2
+
+/*
+ * Capabilities
+ */
+#define NX_CAP_BIT(class, bit) (1 << bit)
+#define NX_CAP0_LEGACY_CONTEXT NX_CAP_BIT(0, 0)
+#define NX_CAP0_MULTI_CONTEXT NX_CAP_BIT(0, 1)
+#define NX_CAP0_LEGACY_MN NX_CAP_BIT(0, 2)
+#define NX_CAP0_LEGACY_MS NX_CAP_BIT(0, 3)
+#define NX_CAP0_CUT_THROUGH NX_CAP_BIT(0, 4)
+#define NX_CAP0_LRO NX_CAP_BIT(0, 5)
+#define NX_CAP0_LSO NX_CAP_BIT(0, 6)
+#define NX_CAP0_JUMBO_CONTIGUOUS NX_CAP_BIT(0, 7)
+#define NX_CAP0_LRO_CONTIGUOUS NX_CAP_BIT(0, 8)
+#define NX_CAP0_HW_LRO NX_CAP_BIT(0, 10)
+#define NX_CAP0_HW_LRO_MSS NX_CAP_BIT(0, 21)
+
+/*
+ * Context state
+ */
+#define NX_HOST_CTX_STATE_FREED 0
+#define NX_HOST_CTX_STATE_ALLOCATED 1
+#define NX_HOST_CTX_STATE_ACTIVE 2
+#define NX_HOST_CTX_STATE_DISABLED 3
+#define NX_HOST_CTX_STATE_QUIESCED 4
+#define NX_HOST_CTX_STATE_MAX 5
+
+/*
+ * Rx context
+ */
+
+typedef struct {
+ __le64 host_phys_addr; /* Ring base addr */
+ __le32 ring_size; /* Ring entries */
+ __le16 msi_index;
+ __le16 rsvd; /* Padding */
+} nx_hostrq_sds_ring_t;
+
+typedef struct {
+ __le64 host_phys_addr; /* Ring base addr */
+ __le64 buff_size; /* Packet buffer size */
+ __le32 ring_size; /* Ring entries */
+ __le32 ring_kind; /* Class of ring */
+} nx_hostrq_rds_ring_t;
+
+typedef struct {
+ __le64 host_rsp_dma_addr; /* Response dma'd here */
+ __le32 capabilities[4]; /* Flag bit vector */
+ __le32 host_int_crb_mode; /* Interrupt crb usage */
+ __le32 host_rds_crb_mode; /* RDS crb usage */
+ /* These ring offsets are relative to data[0] below */
+ __le32 rds_ring_offset; /* Offset to RDS config */
+ __le32 sds_ring_offset; /* Offset to SDS config */
+ __le16 num_rds_rings; /* Count of RDS rings */
+ __le16 num_sds_rings; /* Count of SDS rings */
+ __le16 rsvd1; /* Padding */
+ __le16 rsvd2; /* Padding */
+ u8 reserved[128]; /* reserve space for future expansion*/
+ /* MUST BE 64-bit aligned.
+ The following is packed:
+ - N hostrq_rds_rings
+ - N hostrq_sds_rings */
+ char data[0];
+} nx_hostrq_rx_ctx_t;
+
+typedef struct {
+ __le32 host_producer_crb; /* Crb to use */
+ __le32 rsvd1; /* Padding */
+} nx_cardrsp_rds_ring_t;
+
+typedef struct {
+ __le32 host_consumer_crb; /* Crb to use */
+ __le32 interrupt_crb; /* Crb to use */
+} nx_cardrsp_sds_ring_t;
+
+typedef struct {
+ /* These ring offsets are relative to data[0] below */
+ __le32 rds_ring_offset; /* Offset to RDS config */
+ __le32 sds_ring_offset; /* Offset to SDS config */
+ __le32 host_ctx_state; /* Starting State */
+ __le32 num_fn_per_port; /* How many PCI fn share the port */
+ __le16 num_rds_rings; /* Count of RDS rings */
+ __le16 num_sds_rings; /* Count of SDS rings */
+ __le16 context_id; /* Handle for context */
+ u8 phys_port; /* Physical id of port */
+ u8 virt_port; /* Virtual/Logical id of port */
+ u8 reserved[128]; /* save space for future expansion */
+ /* MUST BE 64-bit aligned.
+ The following is packed:
+ - N cardrsp_rds_rings
+ - N cardrs_sds_rings */
+ char data[0];
+} nx_cardrsp_rx_ctx_t;
+
+#define SIZEOF_HOSTRQ_RX(HOSTRQ_RX, rds_rings, sds_rings) \
+ (sizeof(HOSTRQ_RX) + \
+ (rds_rings)*(sizeof(nx_hostrq_rds_ring_t)) + \
+ (sds_rings)*(sizeof(nx_hostrq_sds_ring_t)))
+
+#define SIZEOF_CARDRSP_RX(CARDRSP_RX, rds_rings, sds_rings) \
+ (sizeof(CARDRSP_RX) + \
+ (rds_rings)*(sizeof(nx_cardrsp_rds_ring_t)) + \
+ (sds_rings)*(sizeof(nx_cardrsp_sds_ring_t)))
+
+/*
+ * Tx context
+ */
+
+typedef struct {
+ __le64 host_phys_addr; /* Ring base addr */
+ __le32 ring_size; /* Ring entries */
+ __le32 rsvd; /* Padding */
+} nx_hostrq_cds_ring_t;
+
+typedef struct {
+ __le64 host_rsp_dma_addr; /* Response dma'd here */
+ __le64 cmd_cons_dma_addr; /* */
+ __le64 dummy_dma_addr; /* */
+ __le32 capabilities[4]; /* Flag bit vector */
+ __le32 host_int_crb_mode; /* Interrupt crb usage */
+ __le32 rsvd1; /* Padding */
+ __le16 rsvd2; /* Padding */
+ __le16 interrupt_ctl;
+ __le16 msi_index;
+ __le16 rsvd3; /* Padding */
+ nx_hostrq_cds_ring_t cds_ring; /* Desc of cds ring */
+ u8 reserved[128]; /* future expansion */
+} nx_hostrq_tx_ctx_t;
+
+typedef struct {
+ __le32 host_producer_crb; /* Crb to use */
+ __le32 interrupt_crb; /* Crb to use */
+} nx_cardrsp_cds_ring_t;
+
+typedef struct {
+ __le32 host_ctx_state; /* Starting state */
+ __le16 context_id; /* Handle for context */
+ u8 phys_port; /* Physical id of port */
+ u8 virt_port; /* Virtual/Logical id of port */
+ nx_cardrsp_cds_ring_t cds_ring; /* Card cds settings */
+ u8 reserved[128]; /* future expansion */
+} nx_cardrsp_tx_ctx_t;
+
+#define SIZEOF_HOSTRQ_TX(HOSTRQ_TX) (sizeof(HOSTRQ_TX))
+#define SIZEOF_CARDRSP_TX(CARDRSP_TX) (sizeof(CARDRSP_TX))
+
+/* CRB */
+
+#define NX_HOST_RDS_CRB_MODE_UNIQUE 0
+#define NX_HOST_RDS_CRB_MODE_SHARED 1
+#define NX_HOST_RDS_CRB_MODE_CUSTOM 2
+#define NX_HOST_RDS_CRB_MODE_MAX 3
+
+#define NX_HOST_INT_CRB_MODE_UNIQUE 0
+#define NX_HOST_INT_CRB_MODE_SHARED 1
+#define NX_HOST_INT_CRB_MODE_NORX 2
+#define NX_HOST_INT_CRB_MODE_NOTX 3
+#define NX_HOST_INT_CRB_MODE_NORXTX 4
+
+
+/* MAC */
+
+#define MC_COUNT_P2 16
+#define MC_COUNT_P3 38
+
+#define NETXEN_MAC_NOOP 0
+#define NETXEN_MAC_ADD 1
+#define NETXEN_MAC_DEL 2
+
+typedef struct nx_mac_list_s {
+ struct list_head list;
+ uint8_t mac_addr[ETH_ALEN+2];
+} nx_mac_list_t;
+
+struct nx_ip_list {
+ struct list_head list;
+ __be32 ip_addr;
+ bool master;
+};
+
+/*
+ * Interrupt coalescing defaults. The defaults are for 1500 MTU. It is
+ * adjusted based on configured MTU.
+ */
+#define NETXEN_DEFAULT_INTR_COALESCE_RX_TIME_US 3
+#define NETXEN_DEFAULT_INTR_COALESCE_RX_PACKETS 256
+#define NETXEN_DEFAULT_INTR_COALESCE_TX_PACKETS 64
+#define NETXEN_DEFAULT_INTR_COALESCE_TX_TIME_US 4
+
+#define NETXEN_NIC_INTR_DEFAULT 0x04
+
+typedef union {
+ struct {
+ uint16_t rx_packets;
+ uint16_t rx_time_us;
+ uint16_t tx_packets;
+ uint16_t tx_time_us;
+ } data;
+ uint64_t word;
+} nx_nic_intr_coalesce_data_t;
+
+typedef struct {
+ uint16_t stats_time_us;
+ uint16_t rate_sample_time;
+ uint16_t flags;
+ uint16_t rsvd_1;
+ uint32_t low_threshold;
+ uint32_t high_threshold;
+ nx_nic_intr_coalesce_data_t normal;
+ nx_nic_intr_coalesce_data_t low;
+ nx_nic_intr_coalesce_data_t high;
+ nx_nic_intr_coalesce_data_t irq;
+} nx_nic_intr_coalesce_t;
+
+#define NX_HOST_REQUEST 0x13
+#define NX_NIC_REQUEST 0x14
+
+#define NX_MAC_EVENT 0x1
+
+#define NX_IP_UP 2
+#define NX_IP_DOWN 3
+
+/*
+ * Driver --> Firmware
+ */
+#define NX_NIC_H2C_OPCODE_START 0
+#define NX_NIC_H2C_OPCODE_CONFIG_RSS 1
+#define NX_NIC_H2C_OPCODE_CONFIG_RSS_TBL 2
+#define NX_NIC_H2C_OPCODE_CONFIG_INTR_COALESCE 3
+#define NX_NIC_H2C_OPCODE_CONFIG_LED 4
+#define NX_NIC_H2C_OPCODE_CONFIG_PROMISCUOUS 5
+#define NX_NIC_H2C_OPCODE_CONFIG_L2_MAC 6
+#define NX_NIC_H2C_OPCODE_LRO_REQUEST 7
+#define NX_NIC_H2C_OPCODE_GET_SNMP_STATS 8
+#define NX_NIC_H2C_OPCODE_PROXY_START_REQUEST 9
+#define NX_NIC_H2C_OPCODE_PROXY_STOP_REQUEST 10
+#define NX_NIC_H2C_OPCODE_PROXY_SET_MTU 11
+#define NX_NIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE 12
+#define NX_NIC_H2C_OPCODE_GET_FINGER_PRINT_REQUEST 13
+#define NX_NIC_H2C_OPCODE_INSTALL_LICENSE_REQUEST 14
+#define NX_NIC_H2C_OPCODE_GET_LICENSE_CAPABILITY_REQUEST 15
+#define NX_NIC_H2C_OPCODE_GET_NET_STATS 16
+#define NX_NIC_H2C_OPCODE_PROXY_UPDATE_P2V 17
+#define NX_NIC_H2C_OPCODE_CONFIG_IPADDR 18
+#define NX_NIC_H2C_OPCODE_CONFIG_LOOPBACK 19
+#define NX_NIC_H2C_OPCODE_PROXY_STOP_DONE 20
+#define NX_NIC_H2C_OPCODE_GET_LINKEVENT 21
+#define NX_NIC_C2C_OPCODE 22
+#define NX_NIC_H2C_OPCODE_CONFIG_BRIDGING 23
+#define NX_NIC_H2C_OPCODE_CONFIG_HW_LRO 24
+#define NX_NIC_H2C_OPCODE_LAST 25
+
+/*
+ * Firmware --> Driver
+ */
+
+#define NX_NIC_C2H_OPCODE_START 128
+#define NX_NIC_C2H_OPCODE_CONFIG_RSS_RESPONSE 129
+#define NX_NIC_C2H_OPCODE_CONFIG_RSS_TBL_RESPONSE 130
+#define NX_NIC_C2H_OPCODE_CONFIG_MAC_RESPONSE 131
+#define NX_NIC_C2H_OPCODE_CONFIG_PROMISCUOUS_RESPONSE 132
+#define NX_NIC_C2H_OPCODE_CONFIG_L2_MAC_RESPONSE 133
+#define NX_NIC_C2H_OPCODE_LRO_DELETE_RESPONSE 134
+#define NX_NIC_C2H_OPCODE_LRO_ADD_FAILURE_RESPONSE 135
+#define NX_NIC_C2H_OPCODE_GET_SNMP_STATS 136
+#define NX_NIC_C2H_OPCODE_GET_FINGER_PRINT_REPLY 137
+#define NX_NIC_C2H_OPCODE_INSTALL_LICENSE_REPLY 138
+#define NX_NIC_C2H_OPCODE_GET_LICENSE_CAPABILITIES_REPLY 139
+#define NX_NIC_C2H_OPCODE_GET_NET_STATS_RESPONSE 140
+#define NX_NIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE 141
+#define NX_NIC_C2H_OPCODE_LAST 142
+
+#define VPORT_MISS_MODE_DROP 0 /* drop all unmatched */
+#define VPORT_MISS_MODE_ACCEPT_ALL 1 /* accept all packets */
+#define VPORT_MISS_MODE_ACCEPT_MULTI 2 /* accept unmatched multicast */
+
+#define NX_NIC_LRO_REQUEST_FIRST 0
+#define NX_NIC_LRO_REQUEST_ADD_FLOW 1
+#define NX_NIC_LRO_REQUEST_DELETE_FLOW 2
+#define NX_NIC_LRO_REQUEST_TIMER 3
+#define NX_NIC_LRO_REQUEST_CLEANUP 4
+#define NX_NIC_LRO_REQUEST_ADD_FLOW_SCHEDULED 5
+#define NX_TOE_LRO_REQUEST_ADD_FLOW 6
+#define NX_TOE_LRO_REQUEST_ADD_FLOW_RESPONSE 7
+#define NX_TOE_LRO_REQUEST_DELETE_FLOW 8
+#define NX_TOE_LRO_REQUEST_DELETE_FLOW_RESPONSE 9
+#define NX_TOE_LRO_REQUEST_TIMER 10
+#define NX_NIC_LRO_REQUEST_LAST 11
+
+#define NX_FW_CAPABILITY_LINK_NOTIFICATION (1 << 5)
+#define NX_FW_CAPABILITY_SWITCHING (1 << 6)
+#define NX_FW_CAPABILITY_PEXQ (1 << 7)
+#define NX_FW_CAPABILITY_BDG (1 << 8)
+#define NX_FW_CAPABILITY_FVLANTX (1 << 9)
+#define NX_FW_CAPABILITY_HW_LRO (1 << 10)
+#define NX_FW_CAPABILITY_GBE_LINK_CFG (1 << 11)
+#define NX_FW_CAPABILITY_MORE_CAPS (1 << 31)
+#define NX_FW_CAPABILITY_2_LRO_MAX_TCP_SEG (1 << 2)
+
+/* module types */
+#define LINKEVENT_MODULE_NOT_PRESENT 1
+#define LINKEVENT_MODULE_OPTICAL_UNKNOWN 2
+#define LINKEVENT_MODULE_OPTICAL_SRLR 3
+#define LINKEVENT_MODULE_OPTICAL_LRM 4
+#define LINKEVENT_MODULE_OPTICAL_SFP_1G 5
+#define LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE 6
+#define LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN 7
+#define LINKEVENT_MODULE_TWINAX 8
+
+#define LINKSPEED_10GBPS 10000
+#define LINKSPEED_1GBPS 1000
+#define LINKSPEED_100MBPS 100
+#define LINKSPEED_10MBPS 10
+
+#define LINKSPEED_ENCODED_10MBPS 0
+#define LINKSPEED_ENCODED_100MBPS 1
+#define LINKSPEED_ENCODED_1GBPS 2
+
+#define LINKEVENT_AUTONEG_DISABLED 0
+#define LINKEVENT_AUTONEG_ENABLED 1
+
+#define LINKEVENT_HALF_DUPLEX 0
+#define LINKEVENT_FULL_DUPLEX 1
+
+#define LINKEVENT_LINKSPEED_MBPS 0
+#define LINKEVENT_LINKSPEED_ENCODED 1
+
+#define AUTO_FW_RESET_ENABLED 0xEF10AF12
+#define AUTO_FW_RESET_DISABLED 0xDCBAAF12
+
+/* firmware response header:
+ * 63:58 - message type
+ * 57:56 - owner
+ * 55:53 - desc count
+ * 52:48 - reserved
+ * 47:40 - completion id
+ * 39:32 - opcode
+ * 31:16 - error code
+ * 15:00 - reserved
+ */
+#define netxen_get_nic_msgtype(msg_hdr) \
+ ((msg_hdr >> 58) & 0x3F)
+#define netxen_get_nic_msg_compid(msg_hdr) \
+ ((msg_hdr >> 40) & 0xFF)
+#define netxen_get_nic_msg_opcode(msg_hdr) \
+ ((msg_hdr >> 32) & 0xFF)
+#define netxen_get_nic_msg_errcode(msg_hdr) \
+ ((msg_hdr >> 16) & 0xFFFF)
+
+typedef struct {
+ union {
+ struct {
+ u64 hdr;
+ u64 body[7];
+ };
+ u64 words[8];
+ };
+} nx_fw_msg_t;
+
+typedef struct {
+ __le64 qhdr;
+ __le64 req_hdr;
+ __le64 words[6];
+} nx_nic_req_t;
+
+typedef struct {
+ u8 op;
+ u8 tag;
+ u8 mac_addr[6];
+} nx_mac_req_t;
+
+#define MAX_PENDING_DESC_BLOCK_SIZE 64
+
+#define NETXEN_NIC_MSI_ENABLED 0x02
+#define NETXEN_NIC_MSIX_ENABLED 0x04
+#define NETXEN_NIC_LRO_ENABLED 0x08
+#define NETXEN_NIC_LRO_DISABLED 0x00
+#define NETXEN_NIC_BRIDGE_ENABLED 0X10
+#define NETXEN_NIC_DIAG_ENABLED 0x20
+#define NETXEN_FW_RESET_OWNER 0x40
+#define NETXEN_FW_MSS_CAP 0x80
+#define NETXEN_IS_MSI_FAMILY(adapter) \
+ ((adapter)->flags & (NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED))
+
+#define MSIX_ENTRIES_PER_ADAPTER NUM_STS_DESC_RINGS
+#define NETXEN_MSIX_TBL_SPACE 8192
+#define NETXEN_PCI_REG_MSIX_TBL 0x44
+
+#define NETXEN_DB_MAPSIZE_BYTES 0x1000
+
+#define NETXEN_ADAPTER_UP_MAGIC 777
+#define NETXEN_NIC_PEG_TUNE 0
+
+#define __NX_FW_ATTACHED 0
+#define __NX_DEV_UP 1
+#define __NX_RESETTING 2
+
+/* Mini Coredump FW supported version */
+#define NX_MD_SUPPORT_MAJOR 4
+#define NX_MD_SUPPORT_MINOR 0
+#define NX_MD_SUPPORT_SUBVERSION 579
+
+#define LSW(x) ((uint16_t)(x))
+#define LSD(x) ((uint32_t)((uint64_t)(x)))
+#define MSD(x) ((uint32_t)((((uint64_t)(x)) >> 16) >> 16))
+
+/* Mini Coredump mask level */
+#define NX_DUMP_MASK_MIN 0x03
+#define NX_DUMP_MASK_DEF 0x1f
+#define NX_DUMP_MASK_MAX 0xff
+
+/* Mini Coredump CDRP commands */
+#define NX_CDRP_CMD_TEMP_SIZE 0x0000002f
+#define NX_CDRP_CMD_GET_TEMP_HDR 0x00000030
+
+
+#define NX_DUMP_STATE_ARRAY_LEN 16
+#define NX_DUMP_CAP_SIZE_ARRAY_LEN 8
+
+/* Mini Coredump sysfs entries flags*/
+#define NX_FORCE_FW_DUMP_KEY 0xdeadfeed
+#define NX_ENABLE_FW_DUMP 0xaddfeed
+#define NX_DISABLE_FW_DUMP 0xbadfeed
+#define NX_FORCE_FW_RESET 0xdeaddead
+
+
+/* Flash read/write address */
+#define NX_FW_DUMP_REG1 0x00130060
+#define NX_FW_DUMP_REG2 0x001e0000
+#define NX_FLASH_SEM2_LK 0x0013C010
+#define NX_FLASH_SEM2_ULK 0x0013C014
+#define NX_FLASH_LOCK_ID 0x001B2100
+#define FLASH_ROM_WINDOW 0x42110030
+#define FLASH_ROM_DATA 0x42150000
+
+/* Mini Coredump register read/write routine */
+#define NX_RD_DUMP_REG(addr, bar0, data) do { \
+ writel((addr & 0xFFFF0000), (void __iomem *) (bar0 + \
+ NX_FW_DUMP_REG1)); \
+ readl((void __iomem *) (bar0 + NX_FW_DUMP_REG1)); \
+ *data = readl((void __iomem *) (bar0 + NX_FW_DUMP_REG2 + \
+ LSW(addr))); \
+} while (0)
+
+#define NX_WR_DUMP_REG(addr, bar0, data) do { \
+ writel((addr & 0xFFFF0000), (void __iomem *) (bar0 + \
+ NX_FW_DUMP_REG1)); \
+ readl((void __iomem *) (bar0 + NX_FW_DUMP_REG1)); \
+ writel(data, (void __iomem *) (bar0 + NX_FW_DUMP_REG2 + LSW(addr)));\
+ readl((void __iomem *) (bar0 + NX_FW_DUMP_REG2 + LSW(addr))); \
+} while (0)
+
+
+/*
+Entry Type Defines
+*/
+
+#define RDNOP 0
+#define RDCRB 1
+#define RDMUX 2
+#define QUEUE 3
+#define BOARD 4
+#define RDSRE 5
+#define RDOCM 6
+#define PREGS 7
+#define L1DTG 8
+#define L1ITG 9
+#define CACHE 10
+
+#define L1DAT 11
+#define L1INS 12
+#define RDSTK 13
+#define RDCON 14
+
+#define L2DTG 21
+#define L2ITG 22
+#define L2DAT 23
+#define L2INS 24
+#define RDOC3 25
+
+#define MEMBK 32
+
+#define RDROM 71
+#define RDMEM 72
+#define RDMN 73
+
+#define INFOR 81
+#define CNTRL 98
+
+#define TLHDR 99
+#define RDEND 255
+
+#define PRIMQ 103
+#define SQG2Q 104
+#define SQG3Q 105
+
+/*
+* Opcodes for Control Entries.
+* These Flags are bit fields.
+*/
+#define NX_DUMP_WCRB 0x01
+#define NX_DUMP_RWCRB 0x02
+#define NX_DUMP_ANDCRB 0x04
+#define NX_DUMP_ORCRB 0x08
+#define NX_DUMP_POLLCRB 0x10
+#define NX_DUMP_RD_SAVE 0x20
+#define NX_DUMP_WRT_SAVED 0x40
+#define NX_DUMP_MOD_SAVE_ST 0x80
+
+/* Driver Flags */
+#define NX_DUMP_SKIP 0x80 /* driver skipped this entry */
+#define NX_DUMP_SIZE_ERR 0x40 /*entry size vs capture size mismatch*/
+
+#define NX_PCI_READ_32(ADDR) readl((ADDR))
+#define NX_PCI_WRITE_32(DATA, ADDR) writel(DATA, (ADDR))
+
+
+
+struct netxen_minidump {
+ u32 pos; /* position in the dump buffer */
+ u8 fw_supports_md; /* FW supports Mini cordump */
+ u8 has_valid_dump; /* indicates valid dump */
+ u8 md_capture_mask; /* driver capture mask */
+ u8 md_enabled; /* Turn Mini Coredump on/off */
+ u32 md_dump_size; /* Total FW Mini Coredump size */
+ u32 md_capture_size; /* FW dump capture size */
+ u32 md_template_size; /* FW template size */
+ u32 md_template_ver; /* FW template version */
+ u64 md_timestamp; /* FW Mini dump timestamp */
+ void *md_template; /* FW template will be stored */
+ void *md_capture_buff; /* FW dump will be stored */
+};
+
+
+
+struct netxen_minidump_template_hdr {
+ u32 entry_type;
+ u32 first_entry_offset;
+ u32 size_of_template;
+ u32 capture_mask;
+ u32 num_of_entries;
+ u32 version;
+ u32 driver_timestamp;
+ u32 checksum;
+ u32 driver_capture_mask;
+ u32 driver_info_word2;
+ u32 driver_info_word3;
+ u32 driver_info_word4;
+ u32 saved_state_array[NX_DUMP_STATE_ARRAY_LEN];
+ u32 capture_size_array[NX_DUMP_CAP_SIZE_ARRAY_LEN];
+ u32 rsvd[];
+};
+
+/* Common Entry Header: Common to All Entry Types */
+/*
+ * Driver Code is for driver to write some info about the entry.
+ * Currently not used.
+ */
+
+struct netxen_common_entry_hdr {
+ u32 entry_type;
+ u32 entry_size;
+ u32 entry_capture_size;
+ union {
+ struct {
+ u8 entry_capture_mask;
+ u8 entry_code;
+ u8 driver_code;
+ u8 driver_flags;
+ };
+ u32 entry_ctrl_word;
+ };
+};
+
+
+/* Generic Entry Including Header */
+struct netxen_minidump_entry {
+ struct netxen_common_entry_hdr hdr;
+ u32 entry_data00;
+ u32 entry_data01;
+ u32 entry_data02;
+ u32 entry_data03;
+ u32 entry_data04;
+ u32 entry_data05;
+ u32 entry_data06;
+ u32 entry_data07;
+};
+
+/* Read ROM Header */
+struct netxen_minidump_entry_rdrom {
+ struct netxen_common_entry_hdr h;
+ union {
+ struct {
+ u32 select_addr_reg;
+ };
+ u32 rsvd_0;
+ };
+ union {
+ struct {
+ u8 addr_stride;
+ u8 addr_cnt;
+ u16 data_size;
+ };
+ u32 rsvd_1;
+ };
+ union {
+ struct {
+ u32 op_count;
+ };
+ u32 rsvd_2;
+ };
+ union {
+ struct {
+ u32 read_addr_reg;
+ };
+ u32 rsvd_3;
+ };
+ union {
+ struct {
+ u32 write_mask;
+ };
+ u32 rsvd_4;
+ };
+ union {
+ struct {
+ u32 read_mask;
+ };
+ u32 rsvd_5;
+ };
+ u32 read_addr;
+ u32 read_data_size;
+};
+
+
+/* Read CRB and Control Entry Header */
+struct netxen_minidump_entry_crb {
+ struct netxen_common_entry_hdr h;
+ u32 addr;
+ union {
+ struct {
+ u8 addr_stride;
+ u8 state_index_a;
+ u16 poll_timeout;
+ };
+ u32 addr_cntrl;
+ };
+ u32 data_size;
+ u32 op_count;
+ union {
+ struct {
+ u8 opcode;
+ u8 state_index_v;
+ u8 shl;
+ u8 shr;
+ };
+ u32 control_value;
+ };
+ u32 value_1;
+ u32 value_2;
+ u32 value_3;
+};
+
+/* Read Memory and MN Header */
+struct netxen_minidump_entry_rdmem {
+ struct netxen_common_entry_hdr h;
+ union {
+ struct {
+ u32 select_addr_reg;
+ };
+ u32 rsvd_0;
+ };
+ union {
+ struct {
+ u8 addr_stride;
+ u8 addr_cnt;
+ u16 data_size;
+ };
+ u32 rsvd_1;
+ };
+ union {
+ struct {
+ u32 op_count;
+ };
+ u32 rsvd_2;
+ };
+ union {
+ struct {
+ u32 read_addr_reg;
+ };
+ u32 rsvd_3;
+ };
+ union {
+ struct {
+ u32 cntrl_addr_reg;
+ };
+ u32 rsvd_4;
+ };
+ union {
+ struct {
+ u8 wr_byte0;
+ u8 wr_byte1;
+ u8 poll_mask;
+ u8 poll_cnt;
+ };
+ u32 rsvd_5;
+ };
+ u32 read_addr;
+ u32 read_data_size;
+};
+
+/* Read Cache L1 and L2 Header */
+struct netxen_minidump_entry_cache {
+ struct netxen_common_entry_hdr h;
+ u32 tag_reg_addr;
+ union {
+ struct {
+ u16 tag_value_stride;
+ u16 init_tag_value;
+ };
+ u32 select_addr_cntrl;
+ };
+ u32 data_size;
+ u32 op_count;
+ u32 control_addr;
+ union {
+ struct {
+ u16 write_value;
+ u8 poll_mask;
+ u8 poll_wait;
+ };
+ u32 control_value;
+ };
+ u32 read_addr;
+ union {
+ struct {
+ u8 read_addr_stride;
+ u8 read_addr_cnt;
+ u16 rsvd_1;
+ };
+ u32 read_addr_cntrl;
+ };
+};
+
+/* Read OCM Header */
+struct netxen_minidump_entry_rdocm {
+ struct netxen_common_entry_hdr h;
+ u32 rsvd_0;
+ union {
+ struct {
+ u32 rsvd_1;
+ };
+ u32 select_addr_cntrl;
+ };
+ u32 data_size;
+ u32 op_count;
+ u32 rsvd_2;
+ u32 rsvd_3;
+ u32 read_addr;
+ union {
+ struct {
+ u32 read_addr_stride;
+ };
+ u32 read_addr_cntrl;
+ };
+};
+
+/* Read MUX Header */
+struct netxen_minidump_entry_mux {
+ struct netxen_common_entry_hdr h;
+ u32 select_addr;
+ union {
+ struct {
+ u32 rsvd_0;
+ };
+ u32 select_addr_cntrl;
+ };
+ u32 data_size;
+ u32 op_count;
+ u32 select_value;
+ u32 select_value_stride;
+ u32 read_addr;
+ u32 rsvd_1;
+};
+
+/* Read Queue Header */
+struct netxen_minidump_entry_queue {
+ struct netxen_common_entry_hdr h;
+ u32 select_addr;
+ union {
+ struct {
+ u16 queue_id_stride;
+ u16 rsvd_0;
+ };
+ u32 select_addr_cntrl;
+ };
+ u32 data_size;
+ u32 op_count;
+ u32 rsvd_1;
+ u32 rsvd_2;
+ u32 read_addr;
+ union {
+ struct {
+ u8 read_addr_stride;
+ u8 read_addr_cnt;
+ u16 rsvd_3;
+ };
+ u32 read_addr_cntrl;
+ };
+};
+
+struct netxen_dummy_dma {
+ void *addr;
+ dma_addr_t phys_addr;
+};
+
+struct netxen_adapter {
+ struct netxen_hardware_context ahw;
+
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+ struct list_head mac_list;
+ struct list_head ip_list;
+
+ spinlock_t tx_clean_lock;
+
+ u16 num_txd;
+ u16 num_rxd;
+ u16 num_jumbo_rxd;
+ u16 num_lro_rxd;
+
+ u8 max_rds_rings;
+ u8 max_sds_rings;
+ u8 driver_mismatch;
+ u8 msix_supported;
+ u8 __pad;
+ u8 pci_using_dac;
+ u8 portnum;
+ u8 physical_port;
+
+ u8 mc_enabled;
+ u8 max_mc_count;
+ u8 rss_supported;
+ u8 link_changed;
+ u8 fw_wait_cnt;
+ u8 fw_fail_cnt;
+ u8 tx_timeo_cnt;
+ u8 need_fw_reset;
+
+ u8 has_link_events;
+ u8 fw_type;
+ u16 tx_context_id;
+ u16 mtu;
+ u16 is_up;
+
+ u16 link_speed;
+ u16 link_duplex;
+ u16 link_autoneg;
+ u16 module_type;
+
+ u32 capabilities;
+ u32 flags;
+ u32 irq;
+ u32 temp;
+
+ u32 int_vec_bit;
+ u32 heartbit;
+
+ u8 mac_addr[ETH_ALEN];
+
+ struct netxen_adapter_stats stats;
+
+ struct netxen_recv_context recv_ctx;
+ struct nx_host_tx_ring *tx_ring;
+
+ int (*macaddr_set) (struct netxen_adapter *, u8 *);
+ int (*set_mtu) (struct netxen_adapter *, int);
+ int (*set_promisc) (struct netxen_adapter *, u32);
+ void (*set_multi) (struct net_device *);
+ int (*phy_read) (struct netxen_adapter *, u32 reg, u32 *);
+ int (*phy_write) (struct netxen_adapter *, u32 reg, u32 val);
+ int (*init_port) (struct netxen_adapter *, int);
+ int (*stop_port) (struct netxen_adapter *);
+
+ u32 (*crb_read)(struct netxen_adapter *, ulong);
+ int (*crb_write)(struct netxen_adapter *, ulong, u32);
+
+ int (*pci_mem_read)(struct netxen_adapter *, u64, u64 *);
+ int (*pci_mem_write)(struct netxen_adapter *, u64, u64);
+
+ int (*pci_set_window)(struct netxen_adapter *, u64, u32 *);
+
+ u32 (*io_read)(struct netxen_adapter *, void __iomem *);
+ void (*io_write)(struct netxen_adapter *, void __iomem *, u32);
+
+ void __iomem *tgt_mask_reg;
+ void __iomem *pci_int_reg;
+ void __iomem *tgt_status_reg;
+ void __iomem *crb_int_state_reg;
+ void __iomem *isr_int_vec;
+
+ struct msix_entry msix_entries[MSIX_ENTRIES_PER_ADAPTER];
+
+ struct netxen_dummy_dma dummy_dma;
+
+ struct delayed_work fw_work;
+
+ struct work_struct tx_timeout_task;
+
+ nx_nic_intr_coalesce_t coal;
+
+ unsigned long state;
+ __le32 file_prd_off; /*File fw product offset*/
+ u32 fw_version;
+ const struct firmware *fw;
+ struct netxen_minidump mdump; /* mdump ptr */
+ int fw_mdump_rdy; /* for mdump ready */
+};
+
+int nx_fw_cmd_query_phy(struct netxen_adapter *adapter, u32 reg, u32 *val);
+int nx_fw_cmd_set_phy(struct netxen_adapter *adapter, u32 reg, u32 val);
+
+#define NXRD32(adapter, off) \
+ (adapter->crb_read(adapter, off))
+#define NXWR32(adapter, off, val) \
+ (adapter->crb_write(adapter, off, val))
+#define NXRDIO(adapter, addr) \
+ (adapter->io_read(adapter, addr))
+#define NXWRIO(adapter, addr, val) \
+ (adapter->io_write(adapter, addr, val))
+
+int netxen_pcie_sem_lock(struct netxen_adapter *, int, u32);
+void netxen_pcie_sem_unlock(struct netxen_adapter *, int);
+
+#define netxen_rom_lock(a) \
+ netxen_pcie_sem_lock((a), 2, NETXEN_ROM_LOCK_ID)
+#define netxen_rom_unlock(a) \
+ netxen_pcie_sem_unlock((a), 2)
+#define netxen_phy_lock(a) \
+ netxen_pcie_sem_lock((a), 3, NETXEN_PHY_LOCK_ID)
+#define netxen_phy_unlock(a) \
+ netxen_pcie_sem_unlock((a), 3)
+#define netxen_api_lock(a) \
+ netxen_pcie_sem_lock((a), 5, 0)
+#define netxen_api_unlock(a) \
+ netxen_pcie_sem_unlock((a), 5)
+#define netxen_sw_lock(a) \
+ netxen_pcie_sem_lock((a), 6, 0)
+#define netxen_sw_unlock(a) \
+ netxen_pcie_sem_unlock((a), 6)
+#define crb_win_lock(a) \
+ netxen_pcie_sem_lock((a), 7, NETXEN_CRB_WIN_LOCK_ID)
+#define crb_win_unlock(a) \
+ netxen_pcie_sem_unlock((a), 7)
+
+int netxen_nic_get_board_info(struct netxen_adapter *adapter);
+int netxen_nic_wol_supported(struct netxen_adapter *adapter);
+
+/* Functions from netxen_nic_init.c */
+int netxen_init_dummy_dma(struct netxen_adapter *adapter);
+void netxen_free_dummy_dma(struct netxen_adapter *adapter);
+
+int netxen_check_flash_fw_compatibility(struct netxen_adapter *adapter);
+int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val);
+int netxen_load_firmware(struct netxen_adapter *adapter);
+int netxen_need_fw_reset(struct netxen_adapter *adapter);
+void netxen_request_firmware(struct netxen_adapter *adapter);
+void netxen_release_firmware(struct netxen_adapter *adapter);
+int netxen_pinit_from_rom(struct netxen_adapter *adapter);
+
+int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp);
+int netxen_rom_fast_read_words(struct netxen_adapter *adapter, int addr,
+ u8 *bytes, size_t size);
+int netxen_rom_fast_write_words(struct netxen_adapter *adapter, int addr,
+ u8 *bytes, size_t size);
+int netxen_flash_unlock(struct netxen_adapter *adapter);
+int netxen_backup_crbinit(struct netxen_adapter *adapter);
+int netxen_flash_erase_secondary(struct netxen_adapter *adapter);
+int netxen_flash_erase_primary(struct netxen_adapter *adapter);
+void netxen_halt_pegs(struct netxen_adapter *adapter);
+
+int netxen_rom_se(struct netxen_adapter *adapter, int addr);
+
+int netxen_alloc_sw_resources(struct netxen_adapter *adapter);
+void netxen_free_sw_resources(struct netxen_adapter *adapter);
+
+void netxen_setup_hwops(struct netxen_adapter *adapter);
+void __iomem *netxen_get_ioaddr(struct netxen_adapter *, u32);
+
+int netxen_alloc_hw_resources(struct netxen_adapter *adapter);
+void netxen_free_hw_resources(struct netxen_adapter *adapter);
+
+void netxen_release_rx_buffers(struct netxen_adapter *adapter);
+void netxen_release_tx_buffers(struct netxen_adapter *adapter);
+
+int netxen_init_firmware(struct netxen_adapter *adapter);
+void netxen_nic_clear_stats(struct netxen_adapter *adapter);
+void netxen_watchdog_task(struct work_struct *work);
+void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid,
+ struct nx_host_rds_ring *rds_ring);
+int netxen_process_cmd_ring(struct netxen_adapter *adapter);
+int netxen_process_rcv_ring(struct nx_host_sds_ring *sds_ring, int max);
+
+void netxen_p3_free_mac_list(struct netxen_adapter *adapter);
+int netxen_config_intr_coalesce(struct netxen_adapter *adapter);
+int netxen_config_rss(struct netxen_adapter *adapter, int enable);
+int netxen_config_ipaddr(struct netxen_adapter *adapter, __be32 ip, int cmd);
+int netxen_linkevent_request(struct netxen_adapter *adapter, int enable);
+void netxen_advert_link_change(struct netxen_adapter *adapter, int linkup);
+void netxen_pci_camqm_read_2M(struct netxen_adapter *, u64, u64 *);
+void netxen_pci_camqm_write_2M(struct netxen_adapter *, u64, u64);
+
+int nx_fw_cmd_set_gbe_port(struct netxen_adapter *adapter,
+ u32 speed, u32 duplex, u32 autoneg);
+int nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu);
+int netxen_nic_change_mtu(struct net_device *netdev, int new_mtu);
+int netxen_config_hw_lro(struct netxen_adapter *adapter, int enable);
+int netxen_config_bridged_mode(struct netxen_adapter *adapter, int enable);
+int netxen_send_lro_cleanup(struct netxen_adapter *adapter);
+int netxen_setup_minidump(struct netxen_adapter *adapter);
+void netxen_dump_fw(struct netxen_adapter *adapter);
+void netxen_nic_update_cmd_producer(struct netxen_adapter *adapter,
+ struct nx_host_tx_ring *tx_ring);
+
+/* Functions from netxen_nic_main.c */
+int netxen_nic_reset_context(struct netxen_adapter *);
+
+int nx_dev_request_reset(struct netxen_adapter *adapter);
+
+/*
+ * NetXen Board information
+ */
+
+#define NETXEN_MAX_SHORT_NAME 32
+struct netxen_brdinfo {
+ int brdtype; /* type of board */
+ long ports; /* max no of physical ports */
+ char short_name[NETXEN_MAX_SHORT_NAME];
+};
+
+struct netxen_dimm_cfg {
+ u8 presence;
+ u8 mem_type;
+ u8 dimm_type;
+ u32 size;
+};
+
+static const struct netxen_brdinfo netxen_boards[] = {
+ {NETXEN_BRDTYPE_P2_SB31_10G_CX4, 1, "XGb CX4"},
+ {NETXEN_BRDTYPE_P2_SB31_10G_HMEZ, 1, "XGb HMEZ"},
+ {NETXEN_BRDTYPE_P2_SB31_10G_IMEZ, 2, "XGb IMEZ"},
+ {NETXEN_BRDTYPE_P2_SB31_10G, 1, "XGb XFP"},
+ {NETXEN_BRDTYPE_P2_SB35_4G, 4, "Quad Gb"},
+ {NETXEN_BRDTYPE_P2_SB31_2G, 2, "Dual Gb"},
+ {NETXEN_BRDTYPE_P3_REF_QG, 4, "Reference Quad Gig "},
+ {NETXEN_BRDTYPE_P3_HMEZ, 2, "Dual XGb HMEZ"},
+ {NETXEN_BRDTYPE_P3_10G_CX4_LP, 2, "Dual XGb CX4 LP"},
+ {NETXEN_BRDTYPE_P3_4_GB, 4, "Quad Gig LP"},
+ {NETXEN_BRDTYPE_P3_IMEZ, 2, "Dual XGb IMEZ"},
+ {NETXEN_BRDTYPE_P3_10G_SFP_PLUS, 2, "Dual XGb SFP+ LP"},
+ {NETXEN_BRDTYPE_P3_10000_BASE_T, 1, "XGB 10G BaseT LP"},
+ {NETXEN_BRDTYPE_P3_XG_LOM, 2, "Dual XGb LOM"},
+ {NETXEN_BRDTYPE_P3_4_GB_MM, 4, "NX3031 Gigabit Ethernet"},
+ {NETXEN_BRDTYPE_P3_10G_SFP_CT, 2, "NX3031 10 Gigabit Ethernet"},
+ {NETXEN_BRDTYPE_P3_10G_SFP_QT, 2, "Quanta Dual XGb SFP+"},
+ {NETXEN_BRDTYPE_P3_10G_CX4, 2, "Reference Dual CX4 Option"},
+ {NETXEN_BRDTYPE_P3_10G_XFP, 1, "Reference Single XFP Option"}
+};
+
+#define NUM_SUPPORTED_BOARDS ARRAY_SIZE(netxen_boards)
+
+static inline int netxen_nic_get_brd_name_by_type(u32 type, char *name)
+{
+ int i, found = 0;
+ for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) {
+ if (netxen_boards[i].brdtype == type) {
+ strcpy(name, netxen_boards[i].short_name);
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found) {
+ strcpy(name, "Unknown");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static inline u32 netxen_tx_avail(struct nx_host_tx_ring *tx_ring)
+{
+ smp_mb();
+ return find_diff_among(tx_ring->producer,
+ tx_ring->sw_consumer, tx_ring->num_desc);
+
+}
+
+int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, u64 *mac);
+int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, u64 *mac);
+void netxen_change_ringparam(struct netxen_adapter *adapter);
+
+extern const struct ethtool_ops netxen_nic_ethtool_ops;
+
+#endif /* __NETXEN_NIC_H_ */
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
new file mode 100644
index 000000000..2fcbcecb4
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
@@ -0,0 +1,918 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2003 - 2009 NetXen, Inc.
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ */
+
+#include "netxen_nic_hw.h"
+#include "netxen_nic.h"
+
+#define NXHAL_VERSION 1
+
+static u32
+netxen_poll_rsp(struct netxen_adapter *adapter)
+{
+ u32 rsp = NX_CDRP_RSP_OK;
+ int timeout = 0;
+
+ do {
+ /* give atleast 1ms for firmware to respond */
+ msleep(1);
+
+ if (++timeout > NX_OS_CRB_RETRY_COUNT)
+ return NX_CDRP_RSP_TIMEOUT;
+
+ rsp = NXRD32(adapter, NX_CDRP_CRB_OFFSET);
+ } while (!NX_CDRP_IS_RSP(rsp));
+
+ return rsp;
+}
+
+static u32
+netxen_issue_cmd(struct netxen_adapter *adapter, struct netxen_cmd_args *cmd)
+{
+ u32 rsp;
+ u32 signature = 0;
+ u32 rcode = NX_RCODE_SUCCESS;
+
+ signature = NX_CDRP_SIGNATURE_MAKE(adapter->ahw.pci_func,
+ NXHAL_VERSION);
+ /* Acquire semaphore before accessing CRB */
+ if (netxen_api_lock(adapter))
+ return NX_RCODE_TIMEOUT;
+
+ NXWR32(adapter, NX_SIGN_CRB_OFFSET, signature);
+
+ NXWR32(adapter, NX_ARG1_CRB_OFFSET, cmd->req.arg1);
+
+ NXWR32(adapter, NX_ARG2_CRB_OFFSET, cmd->req.arg2);
+
+ NXWR32(adapter, NX_ARG3_CRB_OFFSET, cmd->req.arg3);
+
+ NXWR32(adapter, NX_CDRP_CRB_OFFSET, NX_CDRP_FORM_CMD(cmd->req.cmd));
+
+ rsp = netxen_poll_rsp(adapter);
+
+ if (rsp == NX_CDRP_RSP_TIMEOUT) {
+ printk(KERN_ERR "%s: card response timeout.\n",
+ netxen_nic_driver_name);
+
+ rcode = NX_RCODE_TIMEOUT;
+ } else if (rsp == NX_CDRP_RSP_FAIL) {
+ rcode = NXRD32(adapter, NX_ARG1_CRB_OFFSET);
+
+ printk(KERN_ERR "%s: failed card response code:0x%x\n",
+ netxen_nic_driver_name, rcode);
+ } else if (rsp == NX_CDRP_RSP_OK) {
+ cmd->rsp.cmd = NX_RCODE_SUCCESS;
+ if (cmd->rsp.arg2)
+ cmd->rsp.arg2 = NXRD32(adapter, NX_ARG2_CRB_OFFSET);
+ if (cmd->rsp.arg3)
+ cmd->rsp.arg3 = NXRD32(adapter, NX_ARG3_CRB_OFFSET);
+ }
+
+ if (cmd->rsp.arg1)
+ cmd->rsp.arg1 = NXRD32(adapter, NX_ARG1_CRB_OFFSET);
+ /* Release semaphore */
+ netxen_api_unlock(adapter);
+
+ return rcode;
+}
+
+static int
+netxen_get_minidump_template_size(struct netxen_adapter *adapter)
+{
+ struct netxen_cmd_args cmd;
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.req.cmd = NX_CDRP_CMD_TEMP_SIZE;
+ memset(&cmd.rsp, 1, sizeof(struct _cdrp_cmd));
+ netxen_issue_cmd(adapter, &cmd);
+ if (cmd.rsp.cmd != NX_RCODE_SUCCESS) {
+ dev_info(&adapter->pdev->dev,
+ "Can't get template size %d\n", cmd.rsp.cmd);
+ return -EIO;
+ }
+ adapter->mdump.md_template_size = cmd.rsp.arg2;
+ adapter->mdump.md_template_ver = cmd.rsp.arg3;
+ return 0;
+}
+
+static int
+netxen_get_minidump_template(struct netxen_adapter *adapter)
+{
+ dma_addr_t md_template_addr;
+ void *addr;
+ u32 size;
+ struct netxen_cmd_args cmd;
+ size = adapter->mdump.md_template_size;
+
+ if (size == 0) {
+ dev_err(&adapter->pdev->dev, "Can not capture Minidump "
+ "template. Invalid template size.\n");
+ return NX_RCODE_INVALID_ARGS;
+ }
+
+ addr = dma_alloc_coherent(&adapter->pdev->dev, size,
+ &md_template_addr, GFP_KERNEL);
+ if (!addr) {
+ dev_err(&adapter->pdev->dev, "Unable to allocate dmable memory for template.\n");
+ return -ENOMEM;
+ }
+
+ memset(&cmd, 0, sizeof(cmd));
+ memset(&cmd.rsp, 1, sizeof(struct _cdrp_cmd));
+ cmd.req.cmd = NX_CDRP_CMD_GET_TEMP_HDR;
+ cmd.req.arg1 = LSD(md_template_addr);
+ cmd.req.arg2 = MSD(md_template_addr);
+ cmd.req.arg3 |= size;
+ netxen_issue_cmd(adapter, &cmd);
+
+ if ((cmd.rsp.cmd == NX_RCODE_SUCCESS) && (size == cmd.rsp.arg2)) {
+ memcpy(adapter->mdump.md_template, addr, size);
+ } else {
+ dev_err(&adapter->pdev->dev, "Failed to get minidump template, err_code : %d, requested_size : %d, actual_size : %d\n",
+ cmd.rsp.cmd, size, cmd.rsp.arg2);
+ }
+ dma_free_coherent(&adapter->pdev->dev, size, addr, md_template_addr);
+ return 0;
+}
+
+static u32
+netxen_check_template_checksum(struct netxen_adapter *adapter)
+{
+ u64 sum = 0 ;
+ u32 *buff = adapter->mdump.md_template;
+ int count = adapter->mdump.md_template_size/sizeof(uint32_t) ;
+
+ while (count-- > 0)
+ sum += *buff++ ;
+ while (sum >> 32)
+ sum = (sum & 0xFFFFFFFF) + (sum >> 32) ;
+
+ return ~sum;
+}
+
+int
+netxen_setup_minidump(struct netxen_adapter *adapter)
+{
+ int err = 0, i;
+ u32 *template, *tmp_buf;
+ err = netxen_get_minidump_template_size(adapter);
+ if (err) {
+ adapter->mdump.fw_supports_md = 0;
+ if ((err == NX_RCODE_CMD_INVALID) ||
+ (err == NX_RCODE_CMD_NOT_IMPL)) {
+ dev_info(&adapter->pdev->dev,
+ "Flashed firmware version does not support minidump, minimum version required is [ %u.%u.%u ]\n",
+ NX_MD_SUPPORT_MAJOR, NX_MD_SUPPORT_MINOR,
+ NX_MD_SUPPORT_SUBVERSION);
+ }
+ return err;
+ }
+
+ if (!adapter->mdump.md_template_size) {
+ dev_err(&adapter->pdev->dev, "Error : Invalid template size "
+ ",should be non-zero.\n");
+ return -EIO;
+ }
+ adapter->mdump.md_template =
+ kmalloc(adapter->mdump.md_template_size, GFP_KERNEL);
+
+ if (!adapter->mdump.md_template)
+ return -ENOMEM;
+
+ err = netxen_get_minidump_template(adapter);
+ if (err) {
+ if (err == NX_RCODE_CMD_NOT_IMPL)
+ adapter->mdump.fw_supports_md = 0;
+ goto free_template;
+ }
+
+ if (netxen_check_template_checksum(adapter)) {
+ dev_err(&adapter->pdev->dev, "Minidump template checksum Error\n");
+ err = -EIO;
+ goto free_template;
+ }
+
+ adapter->mdump.md_capture_mask = NX_DUMP_MASK_DEF;
+ tmp_buf = (u32 *) adapter->mdump.md_template;
+ template = (u32 *) adapter->mdump.md_template;
+ for (i = 0; i < adapter->mdump.md_template_size/sizeof(u32); i++)
+ *template++ = __le32_to_cpu(*tmp_buf++);
+ adapter->mdump.md_capture_buff = NULL;
+ adapter->mdump.fw_supports_md = 1;
+ adapter->mdump.md_enabled = 0;
+
+ return err;
+
+free_template:
+ kfree(adapter->mdump.md_template);
+ adapter->mdump.md_template = NULL;
+ return err;
+}
+
+
+int
+nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu)
+{
+ u32 rcode = NX_RCODE_SUCCESS;
+ struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+ struct netxen_cmd_args cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.req.cmd = NX_CDRP_CMD_SET_MTU;
+ cmd.req.arg1 = recv_ctx->context_id;
+ cmd.req.arg2 = mtu;
+ cmd.req.arg3 = 0;
+
+ if (recv_ctx->state == NX_HOST_CTX_STATE_ACTIVE)
+ rcode = netxen_issue_cmd(adapter, &cmd);
+
+ if (rcode != NX_RCODE_SUCCESS)
+ return -EIO;
+
+ return 0;
+}
+
+int
+nx_fw_cmd_set_gbe_port(struct netxen_adapter *adapter,
+ u32 speed, u32 duplex, u32 autoneg)
+{
+ struct netxen_cmd_args cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.req.cmd = NX_CDRP_CMD_CONFIG_GBE_PORT;
+ cmd.req.arg1 = speed;
+ cmd.req.arg2 = duplex;
+ cmd.req.arg3 = autoneg;
+ return netxen_issue_cmd(adapter, &cmd);
+}
+
+static int
+nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter)
+{
+ void *addr;
+ nx_hostrq_rx_ctx_t *prq;
+ nx_cardrsp_rx_ctx_t *prsp;
+ nx_hostrq_rds_ring_t *prq_rds;
+ nx_hostrq_sds_ring_t *prq_sds;
+ nx_cardrsp_rds_ring_t *prsp_rds;
+ nx_cardrsp_sds_ring_t *prsp_sds;
+ struct nx_host_rds_ring *rds_ring;
+ struct nx_host_sds_ring *sds_ring;
+ struct netxen_cmd_args cmd;
+
+ dma_addr_t hostrq_phys_addr, cardrsp_phys_addr;
+ u64 phys_addr;
+
+ int i, nrds_rings, nsds_rings;
+ size_t rq_size, rsp_size;
+ u32 cap, reg, val;
+
+ int err;
+
+ struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+
+ nrds_rings = adapter->max_rds_rings;
+ nsds_rings = adapter->max_sds_rings;
+
+ rq_size =
+ SIZEOF_HOSTRQ_RX(nx_hostrq_rx_ctx_t, nrds_rings, nsds_rings);
+ rsp_size =
+ SIZEOF_CARDRSP_RX(nx_cardrsp_rx_ctx_t, nrds_rings, nsds_rings);
+
+ addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
+ &hostrq_phys_addr, GFP_KERNEL);
+ if (addr == NULL)
+ return -ENOMEM;
+ prq = addr;
+
+ addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size,
+ &cardrsp_phys_addr, GFP_KERNEL);
+ if (addr == NULL) {
+ err = -ENOMEM;
+ goto out_free_rq;
+ }
+ prsp = addr;
+
+ prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr);
+
+ cap = (NX_CAP0_LEGACY_CONTEXT | NX_CAP0_LEGACY_MN);
+ cap |= (NX_CAP0_JUMBO_CONTIGUOUS | NX_CAP0_LRO_CONTIGUOUS);
+
+ if (adapter->flags & NETXEN_FW_MSS_CAP)
+ cap |= NX_CAP0_HW_LRO_MSS;
+
+ prq->capabilities[0] = cpu_to_le32(cap);
+ prq->host_int_crb_mode =
+ cpu_to_le32(NX_HOST_INT_CRB_MODE_SHARED);
+ prq->host_rds_crb_mode =
+ cpu_to_le32(NX_HOST_RDS_CRB_MODE_UNIQUE);
+
+ prq->num_rds_rings = cpu_to_le16(nrds_rings);
+ prq->num_sds_rings = cpu_to_le16(nsds_rings);
+ prq->rds_ring_offset = cpu_to_le32(0);
+
+ val = le32_to_cpu(prq->rds_ring_offset) +
+ (sizeof(nx_hostrq_rds_ring_t) * nrds_rings);
+ prq->sds_ring_offset = cpu_to_le32(val);
+
+ prq_rds = (nx_hostrq_rds_ring_t *)(prq->data +
+ le32_to_cpu(prq->rds_ring_offset));
+
+ for (i = 0; i < nrds_rings; i++) {
+
+ rds_ring = &recv_ctx->rds_rings[i];
+
+ prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr);
+ prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc);
+ prq_rds[i].ring_kind = cpu_to_le32(i);
+ prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size);
+ }
+
+ prq_sds = (nx_hostrq_sds_ring_t *)(prq->data +
+ le32_to_cpu(prq->sds_ring_offset));
+
+ for (i = 0; i < nsds_rings; i++) {
+
+ sds_ring = &recv_ctx->sds_rings[i];
+
+ prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr);
+ prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc);
+ prq_sds[i].msi_index = cpu_to_le16(i);
+ }
+
+ phys_addr = hostrq_phys_addr;
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.req.arg1 = (u32)(phys_addr >> 32);
+ cmd.req.arg2 = (u32)(phys_addr & 0xffffffff);
+ cmd.req.arg3 = rq_size;
+ cmd.req.cmd = NX_CDRP_CMD_CREATE_RX_CTX;
+ err = netxen_issue_cmd(adapter, &cmd);
+ if (err) {
+ printk(KERN_WARNING
+ "Failed to create rx ctx in firmware%d\n", err);
+ goto out_free_rsp;
+ }
+
+
+ prsp_rds = ((nx_cardrsp_rds_ring_t *)
+ &prsp->data[le32_to_cpu(prsp->rds_ring_offset)]);
+
+ for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) {
+ rds_ring = &recv_ctx->rds_rings[i];
+
+ reg = le32_to_cpu(prsp_rds[i].host_producer_crb);
+ rds_ring->crb_rcv_producer = netxen_get_ioaddr(adapter,
+ NETXEN_NIC_REG(reg - 0x200));
+ }
+
+ prsp_sds = ((nx_cardrsp_sds_ring_t *)
+ &prsp->data[le32_to_cpu(prsp->sds_ring_offset)]);
+
+ for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) {
+ sds_ring = &recv_ctx->sds_rings[i];
+
+ reg = le32_to_cpu(prsp_sds[i].host_consumer_crb);
+ sds_ring->crb_sts_consumer = netxen_get_ioaddr(adapter,
+ NETXEN_NIC_REG(reg - 0x200));
+
+ reg = le32_to_cpu(prsp_sds[i].interrupt_crb);
+ sds_ring->crb_intr_mask = netxen_get_ioaddr(adapter,
+ NETXEN_NIC_REG(reg - 0x200));
+ }
+
+ recv_ctx->state = le32_to_cpu(prsp->host_ctx_state);
+ recv_ctx->context_id = le16_to_cpu(prsp->context_id);
+ recv_ctx->virt_port = prsp->virt_port;
+
+out_free_rsp:
+ dma_free_coherent(&adapter->pdev->dev, rsp_size, prsp,
+ cardrsp_phys_addr);
+out_free_rq:
+ dma_free_coherent(&adapter->pdev->dev, rq_size, prq, hostrq_phys_addr);
+ return err;
+}
+
+static void
+nx_fw_cmd_destroy_rx_ctx(struct netxen_adapter *adapter)
+{
+ struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+ struct netxen_cmd_args cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.req.arg1 = recv_ctx->context_id;
+ cmd.req.arg2 = NX_DESTROY_CTX_RESET;
+ cmd.req.arg3 = 0;
+ cmd.req.cmd = NX_CDRP_CMD_DESTROY_RX_CTX;
+
+ if (netxen_issue_cmd(adapter, &cmd)) {
+ printk(KERN_WARNING
+ "%s: Failed to destroy rx ctx in firmware\n",
+ netxen_nic_driver_name);
+ }
+}
+
+static int
+nx_fw_cmd_create_tx_ctx(struct netxen_adapter *adapter)
+{
+ nx_hostrq_tx_ctx_t *prq;
+ nx_hostrq_cds_ring_t *prq_cds;
+ nx_cardrsp_tx_ctx_t *prsp;
+ void *rq_addr, *rsp_addr;
+ size_t rq_size, rsp_size;
+ u32 temp;
+ int err = 0;
+ u64 offset, phys_addr;
+ dma_addr_t rq_phys_addr, rsp_phys_addr;
+ struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
+ struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+ struct netxen_cmd_args cmd;
+
+ rq_size = SIZEOF_HOSTRQ_TX(nx_hostrq_tx_ctx_t);
+ rq_addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
+ &rq_phys_addr, GFP_KERNEL);
+ if (!rq_addr)
+ return -ENOMEM;
+
+ rsp_size = SIZEOF_CARDRSP_TX(nx_cardrsp_tx_ctx_t);
+ rsp_addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size,
+ &rsp_phys_addr, GFP_KERNEL);
+ if (!rsp_addr) {
+ err = -ENOMEM;
+ goto out_free_rq;
+ }
+
+ prq = rq_addr;
+
+ prsp = rsp_addr;
+
+ prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr);
+
+ temp = (NX_CAP0_LEGACY_CONTEXT | NX_CAP0_LEGACY_MN | NX_CAP0_LSO);
+ prq->capabilities[0] = cpu_to_le32(temp);
+
+ prq->host_int_crb_mode =
+ cpu_to_le32(NX_HOST_INT_CRB_MODE_SHARED);
+
+ prq->interrupt_ctl = 0;
+ prq->msi_index = 0;
+
+ prq->dummy_dma_addr = cpu_to_le64(adapter->dummy_dma.phys_addr);
+
+ offset = recv_ctx->phys_addr + sizeof(struct netxen_ring_ctx);
+ prq->cmd_cons_dma_addr = cpu_to_le64(offset);
+
+ prq_cds = &prq->cds_ring;
+
+ prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr);
+ prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc);
+
+ phys_addr = rq_phys_addr;
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.req.arg1 = (u32)(phys_addr >> 32);
+ cmd.req.arg2 = ((u32)phys_addr & 0xffffffff);
+ cmd.req.arg3 = rq_size;
+ cmd.req.cmd = NX_CDRP_CMD_CREATE_TX_CTX;
+ err = netxen_issue_cmd(adapter, &cmd);
+
+ if (err == NX_RCODE_SUCCESS) {
+ temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
+ tx_ring->crb_cmd_producer = netxen_get_ioaddr(adapter,
+ NETXEN_NIC_REG(temp - 0x200));
+#if 0
+ adapter->tx_state =
+ le32_to_cpu(prsp->host_ctx_state);
+#endif
+ adapter->tx_context_id =
+ le16_to_cpu(prsp->context_id);
+ } else {
+ printk(KERN_WARNING
+ "Failed to create tx ctx in firmware%d\n", err);
+ err = -EIO;
+ }
+
+ dma_free_coherent(&adapter->pdev->dev, rsp_size, rsp_addr,
+ rsp_phys_addr);
+
+out_free_rq:
+ dma_free_coherent(&adapter->pdev->dev, rq_size, rq_addr, rq_phys_addr);
+
+ return err;
+}
+
+static void
+nx_fw_cmd_destroy_tx_ctx(struct netxen_adapter *adapter)
+{
+ struct netxen_cmd_args cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.req.arg1 = adapter->tx_context_id;
+ cmd.req.arg2 = NX_DESTROY_CTX_RESET;
+ cmd.req.arg3 = 0;
+ cmd.req.cmd = NX_CDRP_CMD_DESTROY_TX_CTX;
+ if (netxen_issue_cmd(adapter, &cmd)) {
+ printk(KERN_WARNING
+ "%s: Failed to destroy tx ctx in firmware\n",
+ netxen_nic_driver_name);
+ }
+}
+
+int
+nx_fw_cmd_query_phy(struct netxen_adapter *adapter, u32 reg, u32 *val)
+{
+ u32 rcode;
+ struct netxen_cmd_args cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.req.arg1 = reg;
+ cmd.req.arg2 = 0;
+ cmd.req.arg3 = 0;
+ cmd.req.cmd = NX_CDRP_CMD_READ_PHY;
+ cmd.rsp.arg1 = 1;
+ rcode = netxen_issue_cmd(adapter, &cmd);
+ if (rcode != NX_RCODE_SUCCESS)
+ return -EIO;
+
+ if (val == NULL)
+ return -EIO;
+
+ *val = cmd.rsp.arg1;
+ return 0;
+}
+
+int
+nx_fw_cmd_set_phy(struct netxen_adapter *adapter, u32 reg, u32 val)
+{
+ u32 rcode;
+ struct netxen_cmd_args cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.req.arg1 = reg;
+ cmd.req.arg2 = val;
+ cmd.req.arg3 = 0;
+ cmd.req.cmd = NX_CDRP_CMD_WRITE_PHY;
+ rcode = netxen_issue_cmd(adapter, &cmd);
+ if (rcode != NX_RCODE_SUCCESS)
+ return -EIO;
+
+ return 0;
+}
+
+static u64 ctx_addr_sig_regs[][3] = {
+ {NETXEN_NIC_REG(0x188), NETXEN_NIC_REG(0x18c), NETXEN_NIC_REG(0x1c0)},
+ {NETXEN_NIC_REG(0x190), NETXEN_NIC_REG(0x194), NETXEN_NIC_REG(0x1c4)},
+ {NETXEN_NIC_REG(0x198), NETXEN_NIC_REG(0x19c), NETXEN_NIC_REG(0x1c8)},
+ {NETXEN_NIC_REG(0x1a0), NETXEN_NIC_REG(0x1a4), NETXEN_NIC_REG(0x1cc)}
+};
+
+#define CRB_CTX_ADDR_REG_LO(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][0])
+#define CRB_CTX_ADDR_REG_HI(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][2])
+#define CRB_CTX_SIGNATURE_REG(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][1])
+
+#define lower32(x) ((u32)((x) & 0xffffffff))
+#define upper32(x) ((u32)(((u64)(x) >> 32) & 0xffffffff))
+
+static struct netxen_recv_crb recv_crb_registers[] = {
+ /* Instance 0 */
+ {
+ /* crb_rcv_producer: */
+ {
+ NETXEN_NIC_REG(0x100),
+ /* Jumbo frames */
+ NETXEN_NIC_REG(0x110),
+ /* LRO */
+ NETXEN_NIC_REG(0x120)
+ },
+ /* crb_sts_consumer: */
+ {
+ NETXEN_NIC_REG(0x138),
+ NETXEN_NIC_REG_2(0x000),
+ NETXEN_NIC_REG_2(0x004),
+ NETXEN_NIC_REG_2(0x008),
+ },
+ /* sw_int_mask */
+ {
+ CRB_SW_INT_MASK_0,
+ NETXEN_NIC_REG_2(0x044),
+ NETXEN_NIC_REG_2(0x048),
+ NETXEN_NIC_REG_2(0x04c),
+ },
+ },
+ /* Instance 1 */
+ {
+ /* crb_rcv_producer: */
+ {
+ NETXEN_NIC_REG(0x144),
+ /* Jumbo frames */
+ NETXEN_NIC_REG(0x154),
+ /* LRO */
+ NETXEN_NIC_REG(0x164)
+ },
+ /* crb_sts_consumer: */
+ {
+ NETXEN_NIC_REG(0x17c),
+ NETXEN_NIC_REG_2(0x020),
+ NETXEN_NIC_REG_2(0x024),
+ NETXEN_NIC_REG_2(0x028),
+ },
+ /* sw_int_mask */
+ {
+ CRB_SW_INT_MASK_1,
+ NETXEN_NIC_REG_2(0x064),
+ NETXEN_NIC_REG_2(0x068),
+ NETXEN_NIC_REG_2(0x06c),
+ },
+ },
+ /* Instance 2 */
+ {
+ /* crb_rcv_producer: */
+ {
+ NETXEN_NIC_REG(0x1d8),
+ /* Jumbo frames */
+ NETXEN_NIC_REG(0x1f8),
+ /* LRO */
+ NETXEN_NIC_REG(0x208)
+ },
+ /* crb_sts_consumer: */
+ {
+ NETXEN_NIC_REG(0x220),
+ NETXEN_NIC_REG_2(0x03c),
+ NETXEN_NIC_REG_2(0x03c),
+ NETXEN_NIC_REG_2(0x03c),
+ },
+ /* sw_int_mask */
+ {
+ CRB_SW_INT_MASK_2,
+ NETXEN_NIC_REG_2(0x03c),
+ NETXEN_NIC_REG_2(0x03c),
+ NETXEN_NIC_REG_2(0x03c),
+ },
+ },
+ /* Instance 3 */
+ {
+ /* crb_rcv_producer: */
+ {
+ NETXEN_NIC_REG(0x22c),
+ /* Jumbo frames */
+ NETXEN_NIC_REG(0x23c),
+ /* LRO */
+ NETXEN_NIC_REG(0x24c)
+ },
+ /* crb_sts_consumer: */
+ {
+ NETXEN_NIC_REG(0x264),
+ NETXEN_NIC_REG_2(0x03c),
+ NETXEN_NIC_REG_2(0x03c),
+ NETXEN_NIC_REG_2(0x03c),
+ },
+ /* sw_int_mask */
+ {
+ CRB_SW_INT_MASK_3,
+ NETXEN_NIC_REG_2(0x03c),
+ NETXEN_NIC_REG_2(0x03c),
+ NETXEN_NIC_REG_2(0x03c),
+ },
+ },
+};
+
+static int
+netxen_init_old_ctx(struct netxen_adapter *adapter)
+{
+ struct netxen_recv_context *recv_ctx;
+ struct nx_host_rds_ring *rds_ring;
+ struct nx_host_sds_ring *sds_ring;
+ struct nx_host_tx_ring *tx_ring;
+ int ring;
+ int port = adapter->portnum;
+ struct netxen_ring_ctx *hwctx;
+ u32 signature;
+
+ tx_ring = adapter->tx_ring;
+ recv_ctx = &adapter->recv_ctx;
+ hwctx = recv_ctx->hwctx;
+
+ hwctx->cmd_ring_addr = cpu_to_le64(tx_ring->phys_addr);
+ hwctx->cmd_ring_size = cpu_to_le32(tx_ring->num_desc);
+
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+
+ hwctx->rcv_rings[ring].addr =
+ cpu_to_le64(rds_ring->phys_addr);
+ hwctx->rcv_rings[ring].size =
+ cpu_to_le32(rds_ring->num_desc);
+ }
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+
+ if (ring == 0) {
+ hwctx->sts_ring_addr = cpu_to_le64(sds_ring->phys_addr);
+ hwctx->sts_ring_size = cpu_to_le32(sds_ring->num_desc);
+ }
+ hwctx->sts_rings[ring].addr = cpu_to_le64(sds_ring->phys_addr);
+ hwctx->sts_rings[ring].size = cpu_to_le32(sds_ring->num_desc);
+ hwctx->sts_rings[ring].msi_index = cpu_to_le16(ring);
+ }
+ hwctx->sts_ring_count = cpu_to_le32(adapter->max_sds_rings);
+
+ signature = (adapter->max_sds_rings > 1) ?
+ NETXEN_CTX_SIGNATURE_V2 : NETXEN_CTX_SIGNATURE;
+
+ NXWR32(adapter, CRB_CTX_ADDR_REG_LO(port),
+ lower32(recv_ctx->phys_addr));
+ NXWR32(adapter, CRB_CTX_ADDR_REG_HI(port),
+ upper32(recv_ctx->phys_addr));
+ NXWR32(adapter, CRB_CTX_SIGNATURE_REG(port),
+ signature | port);
+ return 0;
+}
+
+int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
+{
+ void *addr;
+ int err = 0;
+ int ring;
+ struct netxen_recv_context *recv_ctx;
+ struct nx_host_rds_ring *rds_ring;
+ struct nx_host_sds_ring *sds_ring;
+ struct nx_host_tx_ring *tx_ring;
+
+ struct pci_dev *pdev = adapter->pdev;
+ struct net_device *netdev = adapter->netdev;
+ int port = adapter->portnum;
+
+ recv_ctx = &adapter->recv_ctx;
+ tx_ring = adapter->tx_ring;
+
+ addr = dma_alloc_coherent(&pdev->dev,
+ sizeof(struct netxen_ring_ctx) + sizeof(uint32_t),
+ &recv_ctx->phys_addr, GFP_KERNEL);
+ if (addr == NULL) {
+ dev_err(&pdev->dev, "failed to allocate hw context\n");
+ return -ENOMEM;
+ }
+
+ recv_ctx->hwctx = addr;
+ recv_ctx->hwctx->ctx_id = cpu_to_le32(port);
+ recv_ctx->hwctx->cmd_consumer_offset =
+ cpu_to_le64(recv_ctx->phys_addr +
+ sizeof(struct netxen_ring_ctx));
+ tx_ring->hw_consumer =
+ (__le32 *)(((char *)addr) + sizeof(struct netxen_ring_ctx));
+
+ /* cmd desc ring */
+ addr = dma_alloc_coherent(&pdev->dev, TX_DESC_RINGSIZE(tx_ring),
+ &tx_ring->phys_addr, GFP_KERNEL);
+
+ if (addr == NULL) {
+ dev_err(&pdev->dev, "%s: failed to allocate tx desc ring\n",
+ netdev->name);
+ err = -ENOMEM;
+ goto err_out_free;
+ }
+
+ tx_ring->desc_head = addr;
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+ addr = dma_alloc_coherent(&adapter->pdev->dev,
+ RCV_DESC_RINGSIZE(rds_ring),
+ &rds_ring->phys_addr, GFP_KERNEL);
+ if (addr == NULL) {
+ dev_err(&pdev->dev,
+ "%s: failed to allocate rds ring [%d]\n",
+ netdev->name, ring);
+ err = -ENOMEM;
+ goto err_out_free;
+ }
+ rds_ring->desc_head = addr;
+
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ rds_ring->crb_rcv_producer =
+ netxen_get_ioaddr(adapter,
+ recv_crb_registers[port].crb_rcv_producer[ring]);
+ }
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+
+ addr = dma_alloc_coherent(&adapter->pdev->dev,
+ STATUS_DESC_RINGSIZE(sds_ring),
+ &sds_ring->phys_addr, GFP_KERNEL);
+ if (addr == NULL) {
+ dev_err(&pdev->dev,
+ "%s: failed to allocate sds ring [%d]\n",
+ netdev->name, ring);
+ err = -ENOMEM;
+ goto err_out_free;
+ }
+ sds_ring->desc_head = addr;
+
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+ sds_ring->crb_sts_consumer =
+ netxen_get_ioaddr(adapter,
+ recv_crb_registers[port].crb_sts_consumer[ring]);
+
+ sds_ring->crb_intr_mask =
+ netxen_get_ioaddr(adapter,
+ recv_crb_registers[port].sw_int_mask[ring]);
+ }
+ }
+
+
+ if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+ if (test_and_set_bit(__NX_FW_ATTACHED, &adapter->state))
+ goto done;
+ err = nx_fw_cmd_create_rx_ctx(adapter);
+ if (err)
+ goto err_out_free;
+ err = nx_fw_cmd_create_tx_ctx(adapter);
+ if (err)
+ goto err_out_free;
+ } else {
+ err = netxen_init_old_ctx(adapter);
+ if (err)
+ goto err_out_free;
+ }
+
+done:
+ return 0;
+
+err_out_free:
+ netxen_free_hw_resources(adapter);
+ return err;
+}
+
+void netxen_free_hw_resources(struct netxen_adapter *adapter)
+{
+ struct netxen_recv_context *recv_ctx;
+ struct nx_host_rds_ring *rds_ring;
+ struct nx_host_sds_ring *sds_ring;
+ struct nx_host_tx_ring *tx_ring;
+ int ring;
+
+ int port = adapter->portnum;
+
+ if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+ if (!test_and_clear_bit(__NX_FW_ATTACHED, &adapter->state))
+ goto done;
+
+ nx_fw_cmd_destroy_rx_ctx(adapter);
+ nx_fw_cmd_destroy_tx_ctx(adapter);
+ } else {
+ netxen_api_lock(adapter);
+ NXWR32(adapter, CRB_CTX_SIGNATURE_REG(port),
+ NETXEN_CTX_D3_RESET | port);
+ netxen_api_unlock(adapter);
+ }
+
+ /* Allow dma queues to drain after context reset */
+ msleep(20);
+
+done:
+ recv_ctx = &adapter->recv_ctx;
+
+ if (recv_ctx->hwctx != NULL) {
+ dma_free_coherent(&adapter->pdev->dev,
+ sizeof(struct netxen_ring_ctx) + sizeof(uint32_t),
+ recv_ctx->hwctx, recv_ctx->phys_addr);
+ recv_ctx->hwctx = NULL;
+ }
+
+ tx_ring = adapter->tx_ring;
+ if (tx_ring->desc_head != NULL) {
+ dma_free_coherent(&adapter->pdev->dev,
+ TX_DESC_RINGSIZE(tx_ring),
+ tx_ring->desc_head, tx_ring->phys_addr);
+ tx_ring->desc_head = NULL;
+ }
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+
+ if (rds_ring->desc_head != NULL) {
+ dma_free_coherent(&adapter->pdev->dev,
+ RCV_DESC_RINGSIZE(rds_ring),
+ rds_ring->desc_head,
+ rds_ring->phys_addr);
+ rds_ring->desc_head = NULL;
+ }
+ }
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+
+ if (sds_ring->desc_head != NULL) {
+ dma_free_coherent(&adapter->pdev->dev,
+ STATUS_DESC_RINGSIZE(sds_ring),
+ sds_ring->desc_head,
+ sds_ring->phys_addr);
+ sds_ring->desc_head = NULL;
+ }
+ }
+}
+
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
new file mode 100644
index 000000000..8c4cb910e
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
@@ -0,0 +1,945 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2003 - 2009 NetXen, Inc.
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <asm/io.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+
+#include "netxen_nic.h"
+#include "netxen_nic_hw.h"
+
+struct netxen_nic_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int stat_offset;
+};
+
+#define NETXEN_NIC_STAT(m) sizeof(((struct netxen_adapter *)0)->m), \
+ offsetof(struct netxen_adapter, m)
+
+#define NETXEN_NIC_PORT_WINDOW 0x10000
+#define NETXEN_NIC_INVALID_DATA 0xDEADBEEF
+
+static const struct netxen_nic_stats netxen_nic_gstrings_stats[] = {
+ {"xmit_called", NETXEN_NIC_STAT(stats.xmitcalled)},
+ {"xmit_finished", NETXEN_NIC_STAT(stats.xmitfinished)},
+ {"rx_dropped", NETXEN_NIC_STAT(stats.rxdropped)},
+ {"tx_dropped", NETXEN_NIC_STAT(stats.txdropped)},
+ {"csummed", NETXEN_NIC_STAT(stats.csummed)},
+ {"rx_pkts", NETXEN_NIC_STAT(stats.rx_pkts)},
+ {"lro_pkts", NETXEN_NIC_STAT(stats.lro_pkts)},
+ {"rx_bytes", NETXEN_NIC_STAT(stats.rxbytes)},
+ {"tx_bytes", NETXEN_NIC_STAT(stats.txbytes)},
+};
+
+#define NETXEN_NIC_STATS_LEN ARRAY_SIZE(netxen_nic_gstrings_stats)
+
+static const char netxen_nic_gstrings_test[][ETH_GSTRING_LEN] = {
+ "Register_Test_on_offline",
+ "Link_Test_on_offline"
+};
+
+#define NETXEN_NIC_TEST_LEN ARRAY_SIZE(netxen_nic_gstrings_test)
+
+#define NETXEN_NIC_REGS_COUNT 30
+#define NETXEN_NIC_REGS_LEN (NETXEN_NIC_REGS_COUNT * sizeof(__le32))
+#define NETXEN_MAX_EEPROM_LEN 1024
+
+static int netxen_nic_get_eeprom_len(struct net_device *dev)
+{
+ return NETXEN_FLASH_TOTAL_SIZE;
+}
+
+static void
+netxen_nic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
+{
+ struct netxen_adapter *adapter = netdev_priv(dev);
+ u32 fw_major = 0;
+ u32 fw_minor = 0;
+ u32 fw_build = 0;
+
+ strscpy(drvinfo->driver, netxen_nic_driver_name,
+ sizeof(drvinfo->driver));
+ strscpy(drvinfo->version, NETXEN_NIC_LINUX_VERSIONID,
+ sizeof(drvinfo->version));
+ fw_major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR);
+ fw_minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR);
+ fw_build = NXRD32(adapter, NETXEN_FW_VERSION_SUB);
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d.%d", fw_major, fw_minor, fw_build);
+
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
+}
+
+static int
+netxen_nic_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct netxen_adapter *adapter = netdev_priv(dev);
+ int check_sfp_module = 0;
+ u32 supported, advertising;
+
+ /* read which mode */
+ if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
+ supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full);
+
+ advertising = (ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full |
+ ADVERTISED_1000baseT_Half |
+ ADVERTISED_1000baseT_Full);
+
+ cmd->base.port = PORT_TP;
+
+ cmd->base.speed = adapter->link_speed;
+ cmd->base.duplex = adapter->link_duplex;
+ cmd->base.autoneg = adapter->link_autoneg;
+
+ } else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
+ u32 val;
+
+ val = NXRD32(adapter, NETXEN_PORT_MODE_ADDR);
+ if (val == NETXEN_PORT_MODE_802_3_AP) {
+ supported = SUPPORTED_1000baseT_Full;
+ advertising = ADVERTISED_1000baseT_Full;
+ } else {
+ supported = SUPPORTED_10000baseT_Full;
+ advertising = ADVERTISED_10000baseT_Full;
+ }
+
+ if (netif_running(dev) && adapter->has_link_events) {
+ cmd->base.speed = adapter->link_speed;
+ cmd->base.autoneg = adapter->link_autoneg;
+ cmd->base.duplex = adapter->link_duplex;
+ goto skip;
+ }
+
+ cmd->base.port = PORT_TP;
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+ u16 pcifn = adapter->ahw.pci_func;
+
+ val = NXRD32(adapter, P3_LINK_SPEED_REG(pcifn));
+ cmd->base.speed = P3_LINK_SPEED_MHZ *
+ P3_LINK_SPEED_VAL(pcifn, val);
+ } else
+ cmd->base.speed = SPEED_10000;
+
+ cmd->base.duplex = DUPLEX_FULL;
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ } else
+ return -EIO;
+
+skip:
+ cmd->base.phy_address = adapter->physical_port;
+
+ switch (adapter->ahw.board_type) {
+ case NETXEN_BRDTYPE_P2_SB35_4G:
+ case NETXEN_BRDTYPE_P2_SB31_2G:
+ case NETXEN_BRDTYPE_P3_REF_QG:
+ case NETXEN_BRDTYPE_P3_4_GB:
+ case NETXEN_BRDTYPE_P3_4_GB_MM:
+ supported |= SUPPORTED_Autoneg;
+ advertising |= ADVERTISED_Autoneg;
+ fallthrough;
+ case NETXEN_BRDTYPE_P2_SB31_10G_CX4:
+ case NETXEN_BRDTYPE_P3_10G_CX4:
+ case NETXEN_BRDTYPE_P3_10G_CX4_LP:
+ case NETXEN_BRDTYPE_P3_10000_BASE_T:
+ supported |= SUPPORTED_TP;
+ advertising |= ADVERTISED_TP;
+ cmd->base.port = PORT_TP;
+ cmd->base.autoneg = (adapter->ahw.board_type ==
+ NETXEN_BRDTYPE_P2_SB31_10G_CX4) ?
+ (AUTONEG_DISABLE) : (adapter->link_autoneg);
+ break;
+ case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ:
+ case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ:
+ case NETXEN_BRDTYPE_P3_IMEZ:
+ case NETXEN_BRDTYPE_P3_XG_LOM:
+ case NETXEN_BRDTYPE_P3_HMEZ:
+ supported |= SUPPORTED_MII;
+ advertising |= ADVERTISED_MII;
+ cmd->base.port = PORT_MII;
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ break;
+ case NETXEN_BRDTYPE_P3_10G_SFP_PLUS:
+ case NETXEN_BRDTYPE_P3_10G_SFP_CT:
+ case NETXEN_BRDTYPE_P3_10G_SFP_QT:
+ advertising |= ADVERTISED_TP;
+ supported |= SUPPORTED_TP;
+ check_sfp_module = netif_running(dev) &&
+ adapter->has_link_events;
+ fallthrough;
+ case NETXEN_BRDTYPE_P2_SB31_10G:
+ case NETXEN_BRDTYPE_P3_10G_XFP:
+ supported |= SUPPORTED_FIBRE;
+ advertising |= ADVERTISED_FIBRE;
+ cmd->base.port = PORT_FIBRE;
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ break;
+ case NETXEN_BRDTYPE_P3_10G_TP:
+ if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ supported |= (SUPPORTED_FIBRE | SUPPORTED_TP);
+ advertising |=
+ (ADVERTISED_FIBRE | ADVERTISED_TP);
+ cmd->base.port = PORT_FIBRE;
+ check_sfp_module = netif_running(dev) &&
+ adapter->has_link_events;
+ } else {
+ supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
+ advertising |=
+ (ADVERTISED_TP | ADVERTISED_Autoneg);
+ cmd->base.port = PORT_TP;
+ }
+ break;
+ default:
+ printk(KERN_ERR "netxen-nic: Unsupported board model %d\n",
+ adapter->ahw.board_type);
+ return -EIO;
+ }
+
+ if (check_sfp_module) {
+ switch (adapter->module_type) {
+ case LINKEVENT_MODULE_OPTICAL_UNKNOWN:
+ case LINKEVENT_MODULE_OPTICAL_SRLR:
+ case LINKEVENT_MODULE_OPTICAL_LRM:
+ case LINKEVENT_MODULE_OPTICAL_SFP_1G:
+ cmd->base.port = PORT_FIBRE;
+ break;
+ case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE:
+ case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN:
+ case LINKEVENT_MODULE_TWINAX:
+ cmd->base.port = PORT_TP;
+ break;
+ default:
+ cmd->base.port = -1;
+ }
+ }
+
+ if (!netif_running(dev) || !adapter->ahw.linkup) {
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+ cmd->base.speed = SPEED_UNKNOWN;
+ }
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
+
+ return 0;
+}
+
+static int
+netxen_nic_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct netxen_adapter *adapter = netdev_priv(dev);
+ u32 speed = cmd->base.speed;
+ int ret;
+
+ if (adapter->ahw.port_type != NETXEN_NIC_GBE)
+ return -EOPNOTSUPP;
+
+ if (!(adapter->capabilities & NX_FW_CAPABILITY_GBE_LINK_CFG))
+ return -EOPNOTSUPP;
+
+ ret = nx_fw_cmd_set_gbe_port(adapter, speed, cmd->base.duplex,
+ cmd->base.autoneg);
+ if (ret == NX_RCODE_NOT_SUPPORTED)
+ return -EOPNOTSUPP;
+ else if (ret)
+ return -EIO;
+
+ adapter->link_speed = speed;
+ adapter->link_duplex = cmd->base.duplex;
+ adapter->link_autoneg = cmd->base.autoneg;
+
+ if (!netif_running(dev))
+ return 0;
+
+ dev->netdev_ops->ndo_stop(dev);
+ return dev->netdev_ops->ndo_open(dev);
+}
+
+static int netxen_nic_get_regs_len(struct net_device *dev)
+{
+ return NETXEN_NIC_REGS_LEN;
+}
+
+static void
+netxen_nic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
+{
+ struct netxen_adapter *adapter = netdev_priv(dev);
+ struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+ struct nx_host_sds_ring *sds_ring;
+ u32 *regs_buff = p;
+ int ring, i = 0;
+ int port = adapter->physical_port;
+
+ memset(p, 0, NETXEN_NIC_REGS_LEN);
+
+ regs->version = (1 << 24) | (adapter->ahw.revision_id << 16) |
+ (adapter->pdev)->device;
+
+ if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
+ return;
+
+ regs_buff[i++] = NXRD32(adapter, CRB_CMDPEG_STATE);
+ regs_buff[i++] = NXRD32(adapter, CRB_RCVPEG_STATE);
+ regs_buff[i++] = NXRD32(adapter, CRB_FW_CAPABILITIES_1);
+ regs_buff[i++] = NXRDIO(adapter, adapter->crb_int_state_reg);
+ regs_buff[i++] = NXRD32(adapter, NX_CRB_DEV_REF_COUNT);
+ regs_buff[i++] = NXRD32(adapter, NX_CRB_DEV_STATE);
+ regs_buff[i++] = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER);
+ regs_buff[i++] = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1);
+ regs_buff[i++] = NXRD32(adapter, NETXEN_PEG_HALT_STATUS2);
+
+ regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_0+0x3c);
+ regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_1+0x3c);
+ regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_2+0x3c);
+ regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_3+0x3c);
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+
+ regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_4+0x3c);
+ i += 2;
+
+ regs_buff[i++] = NXRD32(adapter, CRB_XG_STATE_P3);
+ regs_buff[i++] = le32_to_cpu(*(adapter->tx_ring->hw_consumer));
+
+ } else {
+ i++;
+
+ regs_buff[i++] = NXRD32(adapter,
+ NETXEN_NIU_XGE_CONFIG_0+(0x10000*port));
+ regs_buff[i++] = NXRD32(adapter,
+ NETXEN_NIU_XGE_CONFIG_1+(0x10000*port));
+
+ regs_buff[i++] = NXRD32(adapter, CRB_XG_STATE);
+ regs_buff[i++] = NXRDIO(adapter,
+ adapter->tx_ring->crb_cmd_consumer);
+ }
+
+ regs_buff[i++] = NXRDIO(adapter, adapter->tx_ring->crb_cmd_producer);
+
+ regs_buff[i++] = NXRDIO(adapter,
+ recv_ctx->rds_rings[0].crb_rcv_producer);
+ regs_buff[i++] = NXRDIO(adapter,
+ recv_ctx->rds_rings[1].crb_rcv_producer);
+
+ regs_buff[i++] = adapter->max_sds_rings;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &(recv_ctx->sds_rings[ring]);
+ regs_buff[i++] = NXRDIO(adapter,
+ sds_ring->crb_sts_consumer);
+ }
+}
+
+static u32 netxen_nic_test_link(struct net_device *dev)
+{
+ struct netxen_adapter *adapter = netdev_priv(dev);
+ u32 val, port;
+
+ port = adapter->physical_port;
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+ val = NXRD32(adapter, CRB_XG_STATE_P3);
+ val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val);
+ return (val == XG_LINK_UP_P3) ? 0 : 1;
+ } else {
+ val = NXRD32(adapter, CRB_XG_STATE);
+ val = (val >> port*8) & 0xff;
+ return (val == XG_LINK_UP) ? 0 : 1;
+ }
+}
+
+static int
+netxen_nic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
+ u8 *bytes)
+{
+ struct netxen_adapter *adapter = netdev_priv(dev);
+ int offset;
+ int ret;
+
+ if (eeprom->len == 0)
+ return -EINVAL;
+
+ eeprom->magic = (adapter->pdev)->vendor |
+ ((adapter->pdev)->device << 16);
+ offset = eeprom->offset;
+
+ ret = netxen_rom_fast_read_words(adapter, offset, bytes,
+ eeprom->len);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void
+netxen_nic_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ struct netxen_adapter *adapter = netdev_priv(dev);
+
+ ring->rx_pending = adapter->num_rxd;
+ ring->rx_jumbo_pending = adapter->num_jumbo_rxd;
+ ring->rx_jumbo_pending += adapter->num_lro_rxd;
+ ring->tx_pending = adapter->num_txd;
+
+ if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
+ ring->rx_max_pending = MAX_RCV_DESCRIPTORS_1G;
+ ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+ } else {
+ ring->rx_max_pending = MAX_RCV_DESCRIPTORS_10G;
+ ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+ }
+
+ ring->tx_max_pending = MAX_CMD_DESCRIPTORS;
+}
+
+static u32
+netxen_validate_ringparam(u32 val, u32 min, u32 max, char *r_name)
+{
+ u32 num_desc;
+ num_desc = max(val, min);
+ num_desc = min(num_desc, max);
+ num_desc = roundup_pow_of_two(num_desc);
+
+ if (val != num_desc) {
+ printk(KERN_INFO "%s: setting %s ring size %d instead of %d\n",
+ netxen_nic_driver_name, r_name, num_desc, val);
+ }
+
+ return num_desc;
+}
+
+static int
+netxen_nic_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ struct netxen_adapter *adapter = netdev_priv(dev);
+ u16 max_rcv_desc = MAX_RCV_DESCRIPTORS_10G;
+ u16 max_jumbo_desc = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+ u16 num_rxd, num_jumbo_rxd, num_txd;
+
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ return -EOPNOTSUPP;
+
+ if (ring->rx_mini_pending)
+ return -EOPNOTSUPP;
+
+ if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
+ max_rcv_desc = MAX_RCV_DESCRIPTORS_1G;
+ max_jumbo_desc = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+ }
+
+ num_rxd = netxen_validate_ringparam(ring->rx_pending,
+ MIN_RCV_DESCRIPTORS, max_rcv_desc, "rx");
+
+ num_jumbo_rxd = netxen_validate_ringparam(ring->rx_jumbo_pending,
+ MIN_JUMBO_DESCRIPTORS, max_jumbo_desc, "rx jumbo");
+
+ num_txd = netxen_validate_ringparam(ring->tx_pending,
+ MIN_CMD_DESCRIPTORS, MAX_CMD_DESCRIPTORS, "tx");
+
+ if (num_rxd == adapter->num_rxd && num_txd == adapter->num_txd &&
+ num_jumbo_rxd == adapter->num_jumbo_rxd)
+ return 0;
+
+ adapter->num_rxd = num_rxd;
+ adapter->num_jumbo_rxd = num_jumbo_rxd;
+ adapter->num_txd = num_txd;
+
+ return netxen_nic_reset_context(adapter);
+}
+
+static void
+netxen_nic_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct netxen_adapter *adapter = netdev_priv(dev);
+ __u32 val;
+ int port = adapter->physical_port;
+
+ pause->autoneg = 0;
+
+ if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
+ if ((port < 0) || (port >= NETXEN_NIU_MAX_GBE_PORTS))
+ return;
+ /* get flow control settings */
+ val = NXRD32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port));
+ pause->rx_pause = netxen_gb_get_rx_flowctl(val);
+ val = NXRD32(adapter, NETXEN_NIU_GB_PAUSE_CTL);
+ switch (port) {
+ case 0:
+ pause->tx_pause = !(netxen_gb_get_gb0_mask(val));
+ break;
+ case 1:
+ pause->tx_pause = !(netxen_gb_get_gb1_mask(val));
+ break;
+ case 2:
+ pause->tx_pause = !(netxen_gb_get_gb2_mask(val));
+ break;
+ case 3:
+ default:
+ pause->tx_pause = !(netxen_gb_get_gb3_mask(val));
+ break;
+ }
+ } else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
+ if ((port < 0) || (port >= NETXEN_NIU_MAX_XG_PORTS))
+ return;
+ pause->rx_pause = 1;
+ val = NXRD32(adapter, NETXEN_NIU_XG_PAUSE_CTL);
+ if (port == 0)
+ pause->tx_pause = !(netxen_xg_get_xg0_mask(val));
+ else
+ pause->tx_pause = !(netxen_xg_get_xg1_mask(val));
+ } else {
+ printk(KERN_ERR"%s: Unknown board type: %x\n",
+ netxen_nic_driver_name, adapter->ahw.port_type);
+ }
+}
+
+static int
+netxen_nic_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct netxen_adapter *adapter = netdev_priv(dev);
+ __u32 val;
+ int port = adapter->physical_port;
+
+ /* not supported */
+ if (pause->autoneg)
+ return -EINVAL;
+
+ /* read mode */
+ if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
+ if ((port < 0) || (port >= NETXEN_NIU_MAX_GBE_PORTS))
+ return -EIO;
+ /* set flow control */
+ val = NXRD32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port));
+
+ if (pause->rx_pause)
+ netxen_gb_rx_flowctl(val);
+ else
+ netxen_gb_unset_rx_flowctl(val);
+
+ NXWR32(adapter, NETXEN_NIU_GB_MAC_CONFIG_0(port),
+ val);
+ /* set autoneg */
+ val = NXRD32(adapter, NETXEN_NIU_GB_PAUSE_CTL);
+ switch (port) {
+ case 0:
+ if (pause->tx_pause)
+ netxen_gb_unset_gb0_mask(val);
+ else
+ netxen_gb_set_gb0_mask(val);
+ break;
+ case 1:
+ if (pause->tx_pause)
+ netxen_gb_unset_gb1_mask(val);
+ else
+ netxen_gb_set_gb1_mask(val);
+ break;
+ case 2:
+ if (pause->tx_pause)
+ netxen_gb_unset_gb2_mask(val);
+ else
+ netxen_gb_set_gb2_mask(val);
+ break;
+ case 3:
+ default:
+ if (pause->tx_pause)
+ netxen_gb_unset_gb3_mask(val);
+ else
+ netxen_gb_set_gb3_mask(val);
+ break;
+ }
+ NXWR32(adapter, NETXEN_NIU_GB_PAUSE_CTL, val);
+ } else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
+ if ((port < 0) || (port >= NETXEN_NIU_MAX_XG_PORTS))
+ return -EIO;
+ val = NXRD32(adapter, NETXEN_NIU_XG_PAUSE_CTL);
+ if (port == 0) {
+ if (pause->tx_pause)
+ netxen_xg_unset_xg0_mask(val);
+ else
+ netxen_xg_set_xg0_mask(val);
+ } else {
+ if (pause->tx_pause)
+ netxen_xg_unset_xg1_mask(val);
+ else
+ netxen_xg_set_xg1_mask(val);
+ }
+ NXWR32(adapter, NETXEN_NIU_XG_PAUSE_CTL, val);
+ } else {
+ printk(KERN_ERR "%s: Unknown board type: %x\n",
+ netxen_nic_driver_name,
+ adapter->ahw.port_type);
+ }
+ return 0;
+}
+
+static int netxen_nic_reg_test(struct net_device *dev)
+{
+ struct netxen_adapter *adapter = netdev_priv(dev);
+ u32 data_read, data_written;
+
+ data_read = NXRD32(adapter, NETXEN_PCIX_PH_REG(0));
+ if ((data_read & 0xffff) != adapter->pdev->vendor)
+ return 1;
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ return 0;
+
+ data_written = (u32)0xa5a5a5a5;
+
+ NXWR32(adapter, CRB_SCRATCHPAD_TEST, data_written);
+ data_read = NXRD32(adapter, CRB_SCRATCHPAD_TEST);
+ if (data_written != data_read)
+ return 1;
+
+ return 0;
+}
+
+static int netxen_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_TEST:
+ return NETXEN_NIC_TEST_LEN;
+ case ETH_SS_STATS:
+ return NETXEN_NIC_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void
+netxen_nic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
+ u64 *data)
+{
+ memset(data, 0, sizeof(uint64_t) * NETXEN_NIC_TEST_LEN);
+ if ((data[0] = netxen_nic_reg_test(dev)))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ /* link test */
+ if ((data[1] = (u64) netxen_nic_test_link(dev)))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+}
+
+static void
+netxen_nic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ int index;
+
+ switch (stringset) {
+ case ETH_SS_TEST:
+ memcpy(data, *netxen_nic_gstrings_test,
+ NETXEN_NIC_TEST_LEN * ETH_GSTRING_LEN);
+ break;
+ case ETH_SS_STATS:
+ for (index = 0; index < NETXEN_NIC_STATS_LEN; index++) {
+ memcpy(data + index * ETH_GSTRING_LEN,
+ netxen_nic_gstrings_stats[index].stat_string,
+ ETH_GSTRING_LEN);
+ }
+ break;
+ }
+}
+
+static void
+netxen_nic_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct netxen_adapter *adapter = netdev_priv(dev);
+ int index;
+
+ for (index = 0; index < NETXEN_NIC_STATS_LEN; index++) {
+ char *p =
+ (char *)adapter +
+ netxen_nic_gstrings_stats[index].stat_offset;
+ data[index] =
+ (netxen_nic_gstrings_stats[index].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *) p : *(u32 *) p;
+ }
+}
+
+static void
+netxen_nic_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct netxen_adapter *adapter = netdev_priv(dev);
+ u32 wol_cfg = 0;
+
+ wol->supported = 0;
+ wol->wolopts = 0;
+
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ return;
+
+ wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG_NV);
+ if (wol_cfg & (1UL << adapter->portnum))
+ wol->supported |= WAKE_MAGIC;
+
+ wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG);
+ if (wol_cfg & (1UL << adapter->portnum))
+ wol->wolopts |= WAKE_MAGIC;
+}
+
+static int
+netxen_nic_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct netxen_adapter *adapter = netdev_priv(dev);
+ u32 wol_cfg = 0;
+
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ return -EOPNOTSUPP;
+
+ if (wol->wolopts & ~WAKE_MAGIC)
+ return -EOPNOTSUPP;
+
+ wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG_NV);
+ if (!(wol_cfg & (1 << adapter->portnum)))
+ return -EOPNOTSUPP;
+
+ wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG);
+ if (wol->wolopts & WAKE_MAGIC)
+ wol_cfg |= 1UL << adapter->portnum;
+ else
+ wol_cfg &= ~(1UL << adapter->portnum);
+ NXWR32(adapter, NETXEN_WOL_CONFIG, wol_cfg);
+
+ return 0;
+}
+
+/*
+ * Set the coalescing parameters. Currently only normal is supported.
+ * If rx_coalesce_usecs == 0 or rx_max_coalesced_frames == 0 then set the
+ * firmware coalescing to default.
+ */
+static int netxen_set_intr_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ethcoal,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct netxen_adapter *adapter = netdev_priv(netdev);
+
+ if (!NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ return -EINVAL;
+
+ if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
+ return -EINVAL;
+
+ /*
+ * Return Error if unsupported values or
+ * unsupported parameters are set.
+ */
+ if (ethcoal->rx_coalesce_usecs > 0xffff ||
+ ethcoal->rx_max_coalesced_frames > 0xffff ||
+ ethcoal->tx_coalesce_usecs > 0xffff ||
+ ethcoal->tx_max_coalesced_frames > 0xffff)
+ return -EINVAL;
+
+ if (!ethcoal->rx_coalesce_usecs ||
+ !ethcoal->rx_max_coalesced_frames) {
+ adapter->coal.flags = NETXEN_NIC_INTR_DEFAULT;
+ adapter->coal.normal.data.rx_time_us =
+ NETXEN_DEFAULT_INTR_COALESCE_RX_TIME_US;
+ adapter->coal.normal.data.rx_packets =
+ NETXEN_DEFAULT_INTR_COALESCE_RX_PACKETS;
+ } else {
+ adapter->coal.flags = 0;
+ adapter->coal.normal.data.rx_time_us =
+ ethcoal->rx_coalesce_usecs;
+ adapter->coal.normal.data.rx_packets =
+ ethcoal->rx_max_coalesced_frames;
+ }
+ adapter->coal.normal.data.tx_time_us = ethcoal->tx_coalesce_usecs;
+ adapter->coal.normal.data.tx_packets =
+ ethcoal->tx_max_coalesced_frames;
+
+ netxen_config_intr_coalesce(adapter);
+
+ return 0;
+}
+
+static int netxen_get_intr_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ethcoal,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct netxen_adapter *adapter = netdev_priv(netdev);
+
+ if (!NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ return -EINVAL;
+
+ if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
+ return -EINVAL;
+
+ ethcoal->rx_coalesce_usecs = adapter->coal.normal.data.rx_time_us;
+ ethcoal->tx_coalesce_usecs = adapter->coal.normal.data.tx_time_us;
+ ethcoal->rx_max_coalesced_frames =
+ adapter->coal.normal.data.rx_packets;
+ ethcoal->tx_max_coalesced_frames =
+ adapter->coal.normal.data.tx_packets;
+
+ return 0;
+}
+
+static int
+netxen_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
+{
+ struct netxen_adapter *adapter = netdev_priv(netdev);
+ struct netxen_minidump *mdump = &adapter->mdump;
+ if (adapter->fw_mdump_rdy)
+ dump->len = mdump->md_dump_size;
+ else
+ dump->len = 0;
+
+ if (!mdump->md_enabled)
+ dump->flag = ETH_FW_DUMP_DISABLE;
+ else
+ dump->flag = mdump->md_capture_mask;
+
+ dump->version = adapter->fw_version;
+ return 0;
+}
+
+/* Fw dump levels */
+static const u32 FW_DUMP_LEVELS[] = { 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff };
+
+static int
+netxen_set_dump(struct net_device *netdev, struct ethtool_dump *val)
+{
+ int i;
+ struct netxen_adapter *adapter = netdev_priv(netdev);
+ struct netxen_minidump *mdump = &adapter->mdump;
+
+ switch (val->flag) {
+ case NX_FORCE_FW_DUMP_KEY:
+ if (!mdump->md_enabled) {
+ netdev_info(netdev, "FW dump not enabled\n");
+ return 0;
+ }
+ if (adapter->fw_mdump_rdy) {
+ netdev_info(netdev, "Previous dump not cleared, not forcing dump\n");
+ return 0;
+ }
+ netdev_info(netdev, "Forcing a fw dump\n");
+ nx_dev_request_reset(adapter);
+ break;
+ case NX_DISABLE_FW_DUMP:
+ if (mdump->md_enabled) {
+ netdev_info(netdev, "Disabling FW Dump\n");
+ mdump->md_enabled = 0;
+ }
+ break;
+ case NX_ENABLE_FW_DUMP:
+ if (!mdump->md_enabled) {
+ netdev_info(netdev, "Enabling FW dump\n");
+ mdump->md_enabled = 1;
+ }
+ break;
+ case NX_FORCE_FW_RESET:
+ netdev_info(netdev, "Forcing FW reset\n");
+ nx_dev_request_reset(adapter);
+ adapter->flags &= ~NETXEN_FW_RESET_OWNER;
+ break;
+ default:
+ for (i = 0; i < ARRAY_SIZE(FW_DUMP_LEVELS); i++) {
+ if (val->flag == FW_DUMP_LEVELS[i]) {
+ mdump->md_capture_mask = val->flag;
+ netdev_info(netdev,
+ "Driver mask changed to: 0x%x\n",
+ mdump->md_capture_mask);
+ return 0;
+ }
+ }
+ netdev_info(netdev,
+ "Invalid dump level: 0x%x\n", val->flag);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+netxen_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
+ void *buffer)
+{
+ int i, copy_sz;
+ u32 *hdr_ptr, *data;
+ struct netxen_adapter *adapter = netdev_priv(netdev);
+ struct netxen_minidump *mdump = &adapter->mdump;
+
+
+ if (!adapter->fw_mdump_rdy) {
+ netdev_info(netdev, "Dump not available\n");
+ return -EINVAL;
+ }
+ /* Copy template header first */
+ copy_sz = mdump->md_template_size;
+ hdr_ptr = (u32 *) mdump->md_template;
+ data = buffer;
+ for (i = 0; i < copy_sz/sizeof(u32); i++)
+ *data++ = cpu_to_le32(*hdr_ptr++);
+
+ /* Copy captured dump data */
+ memcpy(buffer + copy_sz,
+ mdump->md_capture_buff + mdump->md_template_size,
+ mdump->md_capture_size);
+ dump->len = copy_sz + mdump->md_capture_size;
+ dump->flag = mdump->md_capture_mask;
+
+ /* Free dump area once data has been captured */
+ vfree(mdump->md_capture_buff);
+ mdump->md_capture_buff = NULL;
+ adapter->fw_mdump_rdy = 0;
+ netdev_info(netdev, "extracted the fw dump Successfully\n");
+ return 0;
+}
+
+const struct ethtool_ops netxen_nic_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
+ .get_drvinfo = netxen_nic_get_drvinfo,
+ .get_regs_len = netxen_nic_get_regs_len,
+ .get_regs = netxen_nic_get_regs,
+ .get_link = ethtool_op_get_link,
+ .get_eeprom_len = netxen_nic_get_eeprom_len,
+ .get_eeprom = netxen_nic_get_eeprom,
+ .get_ringparam = netxen_nic_get_ringparam,
+ .set_ringparam = netxen_nic_set_ringparam,
+ .get_pauseparam = netxen_nic_get_pauseparam,
+ .set_pauseparam = netxen_nic_set_pauseparam,
+ .get_wol = netxen_nic_get_wol,
+ .set_wol = netxen_nic_set_wol,
+ .self_test = netxen_nic_diag_test,
+ .get_strings = netxen_nic_get_strings,
+ .get_ethtool_stats = netxen_nic_get_ethtool_stats,
+ .get_sset_count = netxen_get_sset_count,
+ .get_coalesce = netxen_get_intr_coalesce,
+ .set_coalesce = netxen_set_intr_coalesce,
+ .get_dump_flag = netxen_get_dump_flag,
+ .get_dump_data = netxen_get_dump_data,
+ .set_dump = netxen_set_dump,
+ .get_link_ksettings = netxen_nic_get_link_ksettings,
+ .set_link_ksettings = netxen_nic_set_link_ksettings,
+};
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h
new file mode 100644
index 000000000..09b33e182
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hdr.h
@@ -0,0 +1,1063 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2003 - 2009 NetXen, Inc.
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ */
+
+#ifndef __NETXEN_NIC_HDR_H_
+#define __NETXEN_NIC_HDR_H_
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+/*
+ * The basic unit of access when reading/writing control registers.
+ */
+
+typedef __le32 netxen_crbword_t; /* single word in CRB space */
+
+enum {
+ NETXEN_HW_H0_CH_HUB_ADR = 0x05,
+ NETXEN_HW_H1_CH_HUB_ADR = 0x0E,
+ NETXEN_HW_H2_CH_HUB_ADR = 0x03,
+ NETXEN_HW_H3_CH_HUB_ADR = 0x01,
+ NETXEN_HW_H4_CH_HUB_ADR = 0x06,
+ NETXEN_HW_H5_CH_HUB_ADR = 0x07,
+ NETXEN_HW_H6_CH_HUB_ADR = 0x08
+};
+
+/* Hub 0 */
+enum {
+ NETXEN_HW_MN_CRB_AGT_ADR = 0x15,
+ NETXEN_HW_MS_CRB_AGT_ADR = 0x25
+};
+
+/* Hub 1 */
+enum {
+ NETXEN_HW_PS_CRB_AGT_ADR = 0x73,
+ NETXEN_HW_SS_CRB_AGT_ADR = 0x20,
+ NETXEN_HW_RPMX3_CRB_AGT_ADR = 0x0b,
+ NETXEN_HW_QMS_CRB_AGT_ADR = 0x00,
+ NETXEN_HW_SQGS0_CRB_AGT_ADR = 0x01,
+ NETXEN_HW_SQGS1_CRB_AGT_ADR = 0x02,
+ NETXEN_HW_SQGS2_CRB_AGT_ADR = 0x03,
+ NETXEN_HW_SQGS3_CRB_AGT_ADR = 0x04,
+ NETXEN_HW_C2C0_CRB_AGT_ADR = 0x58,
+ NETXEN_HW_C2C1_CRB_AGT_ADR = 0x59,
+ NETXEN_HW_C2C2_CRB_AGT_ADR = 0x5a,
+ NETXEN_HW_RPMX2_CRB_AGT_ADR = 0x0a,
+ NETXEN_HW_RPMX4_CRB_AGT_ADR = 0x0c,
+ NETXEN_HW_RPMX7_CRB_AGT_ADR = 0x0f,
+ NETXEN_HW_RPMX9_CRB_AGT_ADR = 0x12,
+ NETXEN_HW_SMB_CRB_AGT_ADR = 0x18
+};
+
+/* Hub 2 */
+enum {
+ NETXEN_HW_NIU_CRB_AGT_ADR = 0x31,
+ NETXEN_HW_I2C0_CRB_AGT_ADR = 0x19,
+ NETXEN_HW_I2C1_CRB_AGT_ADR = 0x29,
+
+ NETXEN_HW_SN_CRB_AGT_ADR = 0x10,
+ NETXEN_HW_I2Q_CRB_AGT_ADR = 0x20,
+ NETXEN_HW_LPC_CRB_AGT_ADR = 0x22,
+ NETXEN_HW_ROMUSB_CRB_AGT_ADR = 0x21,
+ NETXEN_HW_QM_CRB_AGT_ADR = 0x66,
+ NETXEN_HW_SQG0_CRB_AGT_ADR = 0x60,
+ NETXEN_HW_SQG1_CRB_AGT_ADR = 0x61,
+ NETXEN_HW_SQG2_CRB_AGT_ADR = 0x62,
+ NETXEN_HW_SQG3_CRB_AGT_ADR = 0x63,
+ NETXEN_HW_RPMX1_CRB_AGT_ADR = 0x09,
+ NETXEN_HW_RPMX5_CRB_AGT_ADR = 0x0d,
+ NETXEN_HW_RPMX6_CRB_AGT_ADR = 0x0e,
+ NETXEN_HW_RPMX8_CRB_AGT_ADR = 0x11
+};
+
+/* Hub 3 */
+enum {
+ NETXEN_HW_PH_CRB_AGT_ADR = 0x1A,
+ NETXEN_HW_SRE_CRB_AGT_ADR = 0x50,
+ NETXEN_HW_EG_CRB_AGT_ADR = 0x51,
+ NETXEN_HW_RPMX0_CRB_AGT_ADR = 0x08
+};
+
+/* Hub 4 */
+enum {
+ NETXEN_HW_PEGN0_CRB_AGT_ADR = 0x40,
+ NETXEN_HW_PEGN1_CRB_AGT_ADR,
+ NETXEN_HW_PEGN2_CRB_AGT_ADR,
+ NETXEN_HW_PEGN3_CRB_AGT_ADR,
+ NETXEN_HW_PEGNI_CRB_AGT_ADR,
+ NETXEN_HW_PEGND_CRB_AGT_ADR,
+ NETXEN_HW_PEGNC_CRB_AGT_ADR,
+ NETXEN_HW_PEGR0_CRB_AGT_ADR,
+ NETXEN_HW_PEGR1_CRB_AGT_ADR,
+ NETXEN_HW_PEGR2_CRB_AGT_ADR,
+ NETXEN_HW_PEGR3_CRB_AGT_ADR,
+ NETXEN_HW_PEGN4_CRB_AGT_ADR
+};
+
+/* Hub 5 */
+enum {
+ NETXEN_HW_PEGS0_CRB_AGT_ADR = 0x40,
+ NETXEN_HW_PEGS1_CRB_AGT_ADR,
+ NETXEN_HW_PEGS2_CRB_AGT_ADR,
+ NETXEN_HW_PEGS3_CRB_AGT_ADR,
+ NETXEN_HW_PEGSI_CRB_AGT_ADR,
+ NETXEN_HW_PEGSD_CRB_AGT_ADR,
+ NETXEN_HW_PEGSC_CRB_AGT_ADR
+};
+
+/* Hub 6 */
+enum {
+ NETXEN_HW_CAS0_CRB_AGT_ADR = 0x46,
+ NETXEN_HW_CAS1_CRB_AGT_ADR = 0x47,
+ NETXEN_HW_CAS2_CRB_AGT_ADR = 0x48,
+ NETXEN_HW_CAS3_CRB_AGT_ADR = 0x49,
+ NETXEN_HW_NCM_CRB_AGT_ADR = 0x16,
+ NETXEN_HW_TMR_CRB_AGT_ADR = 0x17,
+ NETXEN_HW_XDMA_CRB_AGT_ADR = 0x05,
+ NETXEN_HW_OCM0_CRB_AGT_ADR = 0x06,
+ NETXEN_HW_OCM1_CRB_AGT_ADR = 0x07
+};
+
+/* Floaters - non existent modules */
+#define NETXEN_HW_EFC_RPMX0_CRB_AGT_ADR 0x67
+
+/* This field defines PCI/X adr [25:20] of agents on the CRB */
+enum {
+ NETXEN_HW_PX_MAP_CRB_PH = 0,
+ NETXEN_HW_PX_MAP_CRB_PS,
+ NETXEN_HW_PX_MAP_CRB_MN,
+ NETXEN_HW_PX_MAP_CRB_MS,
+ NETXEN_HW_PX_MAP_CRB_PGR1,
+ NETXEN_HW_PX_MAP_CRB_SRE,
+ NETXEN_HW_PX_MAP_CRB_NIU,
+ NETXEN_HW_PX_MAP_CRB_QMN,
+ NETXEN_HW_PX_MAP_CRB_SQN0,
+ NETXEN_HW_PX_MAP_CRB_SQN1,
+ NETXEN_HW_PX_MAP_CRB_SQN2,
+ NETXEN_HW_PX_MAP_CRB_SQN3,
+ NETXEN_HW_PX_MAP_CRB_QMS,
+ NETXEN_HW_PX_MAP_CRB_SQS0,
+ NETXEN_HW_PX_MAP_CRB_SQS1,
+ NETXEN_HW_PX_MAP_CRB_SQS2,
+ NETXEN_HW_PX_MAP_CRB_SQS3,
+ NETXEN_HW_PX_MAP_CRB_PGN0,
+ NETXEN_HW_PX_MAP_CRB_PGN1,
+ NETXEN_HW_PX_MAP_CRB_PGN2,
+ NETXEN_HW_PX_MAP_CRB_PGN3,
+ NETXEN_HW_PX_MAP_CRB_PGND,
+ NETXEN_HW_PX_MAP_CRB_PGNI,
+ NETXEN_HW_PX_MAP_CRB_PGS0,
+ NETXEN_HW_PX_MAP_CRB_PGS1,
+ NETXEN_HW_PX_MAP_CRB_PGS2,
+ NETXEN_HW_PX_MAP_CRB_PGS3,
+ NETXEN_HW_PX_MAP_CRB_PGSD,
+ NETXEN_HW_PX_MAP_CRB_PGSI,
+ NETXEN_HW_PX_MAP_CRB_SN,
+ NETXEN_HW_PX_MAP_CRB_PGR2,
+ NETXEN_HW_PX_MAP_CRB_EG,
+ NETXEN_HW_PX_MAP_CRB_PH2,
+ NETXEN_HW_PX_MAP_CRB_PS2,
+ NETXEN_HW_PX_MAP_CRB_CAM,
+ NETXEN_HW_PX_MAP_CRB_CAS0,
+ NETXEN_HW_PX_MAP_CRB_CAS1,
+ NETXEN_HW_PX_MAP_CRB_CAS2,
+ NETXEN_HW_PX_MAP_CRB_C2C0,
+ NETXEN_HW_PX_MAP_CRB_C2C1,
+ NETXEN_HW_PX_MAP_CRB_TIMR,
+ NETXEN_HW_PX_MAP_CRB_PGR3,
+ NETXEN_HW_PX_MAP_CRB_RPMX1,
+ NETXEN_HW_PX_MAP_CRB_RPMX2,
+ NETXEN_HW_PX_MAP_CRB_RPMX3,
+ NETXEN_HW_PX_MAP_CRB_RPMX4,
+ NETXEN_HW_PX_MAP_CRB_RPMX5,
+ NETXEN_HW_PX_MAP_CRB_RPMX6,
+ NETXEN_HW_PX_MAP_CRB_RPMX7,
+ NETXEN_HW_PX_MAP_CRB_XDMA,
+ NETXEN_HW_PX_MAP_CRB_I2Q,
+ NETXEN_HW_PX_MAP_CRB_ROMUSB,
+ NETXEN_HW_PX_MAP_CRB_CAS3,
+ NETXEN_HW_PX_MAP_CRB_RPMX0,
+ NETXEN_HW_PX_MAP_CRB_RPMX8,
+ NETXEN_HW_PX_MAP_CRB_RPMX9,
+ NETXEN_HW_PX_MAP_CRB_OCM0,
+ NETXEN_HW_PX_MAP_CRB_OCM1,
+ NETXEN_HW_PX_MAP_CRB_SMB,
+ NETXEN_HW_PX_MAP_CRB_I2C0,
+ NETXEN_HW_PX_MAP_CRB_I2C1,
+ NETXEN_HW_PX_MAP_CRB_LPC,
+ NETXEN_HW_PX_MAP_CRB_PGNC,
+ NETXEN_HW_PX_MAP_CRB_PGR0
+};
+
+/* This field defines CRB adr [31:20] of the agents */
+
+#define NETXEN_HW_CRB_HUB_AGT_ADR_MN \
+ ((NETXEN_HW_H0_CH_HUB_ADR << 7) | NETXEN_HW_MN_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PH \
+ ((NETXEN_HW_H0_CH_HUB_ADR << 7) | NETXEN_HW_PH_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_MS \
+ ((NETXEN_HW_H0_CH_HUB_ADR << 7) | NETXEN_HW_MS_CRB_AGT_ADR)
+
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PS \
+ ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_PS_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SS \
+ ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SS_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX3 \
+ ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_RPMX3_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_QMS \
+ ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_QMS_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SQS0 \
+ ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SQGS0_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SQS1 \
+ ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SQGS1_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SQS2 \
+ ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SQGS2_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SQS3 \
+ ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SQGS3_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_C2C0 \
+ ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_C2C0_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_C2C1 \
+ ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_C2C1_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX2 \
+ ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_RPMX2_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX4 \
+ ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_RPMX4_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX7 \
+ ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_RPMX7_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX9 \
+ ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_RPMX9_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SMB \
+ ((NETXEN_HW_H1_CH_HUB_ADR << 7) | NETXEN_HW_SMB_CRB_AGT_ADR)
+
+#define NETXEN_HW_CRB_HUB_AGT_ADR_NIU \
+ ((NETXEN_HW_H2_CH_HUB_ADR << 7) | NETXEN_HW_NIU_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_I2C0 \
+ ((NETXEN_HW_H2_CH_HUB_ADR << 7) | NETXEN_HW_I2C0_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_I2C1 \
+ ((NETXEN_HW_H2_CH_HUB_ADR << 7) | NETXEN_HW_I2C1_CRB_AGT_ADR)
+
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SRE \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_SRE_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_EG \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_EG_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX0 \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_RPMX0_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_QMN \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_QM_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SQN0 \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_SQG0_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SQN1 \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_SQG1_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SQN2 \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_SQG2_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SQN3 \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_SQG3_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX1 \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_RPMX1_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX5 \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_RPMX5_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX6 \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_RPMX6_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_RPMX8 \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_RPMX8_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_CAS0 \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_CAS0_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_CAS1 \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_CAS1_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_CAS2 \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_CAS2_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_CAS3 \
+ ((NETXEN_HW_H3_CH_HUB_ADR << 7) | NETXEN_HW_CAS3_CRB_AGT_ADR)
+
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGNI \
+ ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGNI_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGND \
+ ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGND_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGN0 \
+ ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN0_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGN1 \
+ ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN1_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGN2 \
+ ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN2_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGN3 \
+ ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN3_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGN4 \
+ ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGN4_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGNC \
+ ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGNC_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGR0 \
+ ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGR0_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGR1 \
+ ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGR1_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGR2 \
+ ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGR2_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGR3 \
+ ((NETXEN_HW_H4_CH_HUB_ADR << 7) | NETXEN_HW_PEGR3_CRB_AGT_ADR)
+
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGSI \
+ ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGSI_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGSD \
+ ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGSD_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGS0 \
+ ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGS0_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGS1 \
+ ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGS1_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGS2 \
+ ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGS2_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGS3 \
+ ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGS3_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_PGSC \
+ ((NETXEN_HW_H5_CH_HUB_ADR << 7) | NETXEN_HW_PEGSC_CRB_AGT_ADR)
+
+#define NETXEN_HW_CRB_HUB_AGT_ADR_CAM \
+ ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_NCM_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_TIMR \
+ ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_TMR_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_XDMA \
+ ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_XDMA_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_SN \
+ ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_SN_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_I2Q \
+ ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_I2Q_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_ROMUSB \
+ ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_ROMUSB_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_OCM0 \
+ ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_OCM0_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_OCM1 \
+ ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_OCM1_CRB_AGT_ADR)
+#define NETXEN_HW_CRB_HUB_AGT_ADR_LPC \
+ ((NETXEN_HW_H6_CH_HUB_ADR << 7) | NETXEN_HW_LPC_CRB_AGT_ADR)
+
+#define NETXEN_SRE_MISC (NETXEN_CRB_SRE + 0x0002c)
+#define NETXEN_SRE_INT_STATUS (NETXEN_CRB_SRE + 0x00034)
+#define NETXEN_SRE_PBI_ACTIVE_STATUS (NETXEN_CRB_SRE + 0x01014)
+#define NETXEN_SRE_L1RE_CTL (NETXEN_CRB_SRE + 0x03000)
+#define NETXEN_SRE_L2RE_CTL (NETXEN_CRB_SRE + 0x05000)
+#define NETXEN_SRE_BUF_CTL (NETXEN_CRB_SRE + 0x01000)
+
+#define NETXEN_DMA_BASE(U) (NETXEN_CRB_PCIX_MD + 0x20000 + ((U)<<16))
+#define NETXEN_DMA_COMMAND(U) (NETXEN_DMA_BASE(U) + 0x00008)
+
+#define NETXEN_I2Q_CLR_PCI_HI (NETXEN_CRB_I2Q + 0x00034)
+
+#define PEG_NETWORK_BASE(N) (NETXEN_CRB_PEG_NET_0 + (((N)&3) << 20))
+#define CRB_REG_EX_PC 0x3c
+
+#define ROMUSB_GLB (NETXEN_CRB_ROMUSB + 0x00000)
+#define ROMUSB_ROM (NETXEN_CRB_ROMUSB + 0x10000)
+
+#define NETXEN_ROMUSB_GLB_STATUS (ROMUSB_GLB + 0x0004)
+#define NETXEN_ROMUSB_GLB_SW_RESET (ROMUSB_GLB + 0x0008)
+#define NETXEN_ROMUSB_GLB_PAD_GPIO_I (ROMUSB_GLB + 0x000c)
+#define NETXEN_ROMUSB_GLB_CAS_RST (ROMUSB_GLB + 0x0038)
+#define NETXEN_ROMUSB_GLB_TEST_MUX_SEL (ROMUSB_GLB + 0x0044)
+#define NETXEN_ROMUSB_GLB_PEGTUNE_DONE (ROMUSB_GLB + 0x005c)
+#define NETXEN_ROMUSB_GLB_CHIP_CLK_CTRL (ROMUSB_GLB + 0x00A8)
+
+#define NETXEN_ROMUSB_GPIO(n) (ROMUSB_GLB + 0x60 + (4 * (n)))
+
+#define NETXEN_ROMUSB_ROM_INSTR_OPCODE (ROMUSB_ROM + 0x0004)
+#define NETXEN_ROMUSB_ROM_ADDRESS (ROMUSB_ROM + 0x0008)
+#define NETXEN_ROMUSB_ROM_WDATA (ROMUSB_ROM + 0x000c)
+#define NETXEN_ROMUSB_ROM_ABYTE_CNT (ROMUSB_ROM + 0x0010)
+#define NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT (ROMUSB_ROM + 0x0014)
+#define NETXEN_ROMUSB_ROM_RDATA (ROMUSB_ROM + 0x0018)
+
+/* Lock IDs for ROM lock */
+#define ROM_LOCK_DRIVER 0x0d417340
+
+/******************************************************************************
+*
+* Definitions specific to M25P flash
+*
+*******************************************************************************
+* Instructions
+*/
+#define M25P_INSTR_WREN 0x06
+#define M25P_INSTR_WRDI 0x04
+#define M25P_INSTR_RDID 0x9f
+#define M25P_INSTR_RDSR 0x05
+#define M25P_INSTR_WRSR 0x01
+#define M25P_INSTR_READ 0x03
+#define M25P_INSTR_FAST_READ 0x0b
+#define M25P_INSTR_PP 0x02
+#define M25P_INSTR_SE 0xd8
+#define M25P_INSTR_BE 0xc7
+#define M25P_INSTR_DP 0xb9
+#define M25P_INSTR_RES 0xab
+
+/* all are 1MB windows */
+
+#define NETXEN_PCI_CRB_WINDOWSIZE 0x00100000
+#define NETXEN_PCI_CRB_WINDOW(A) \
+ (NETXEN_PCI_CRBSPACE + (A)*NETXEN_PCI_CRB_WINDOWSIZE)
+
+#define NETXEN_CRB_NIU NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_NIU)
+#define NETXEN_CRB_SRE NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_SRE)
+#define NETXEN_CRB_ROMUSB \
+ NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_ROMUSB)
+#define NETXEN_CRB_I2Q NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_I2Q)
+#define NETXEN_CRB_I2C0 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_I2C0)
+#define NETXEN_CRB_SMB NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_SMB)
+#define NETXEN_CRB_MAX NETXEN_PCI_CRB_WINDOW(64)
+
+#define NETXEN_CRB_PCIX_HOST NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PH)
+#define NETXEN_CRB_PCIX_HOST2 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PH2)
+#define NETXEN_CRB_PEG_NET_0 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGN0)
+#define NETXEN_CRB_PEG_NET_1 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGN1)
+#define NETXEN_CRB_PEG_NET_2 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGN2)
+#define NETXEN_CRB_PEG_NET_3 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGN3)
+#define NETXEN_CRB_PEG_NET_4 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_SQS2)
+#define NETXEN_CRB_PEG_NET_D NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGND)
+#define NETXEN_CRB_PEG_NET_I NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PGNI)
+#define NETXEN_CRB_DDR_NET NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_MN)
+#define NETXEN_CRB_QDR_NET NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_SN)
+
+#define NETXEN_CRB_PCIX_MD NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_PS)
+#define NETXEN_CRB_PCIE NETXEN_CRB_PCIX_MD
+
+#define ISR_INT_VECTOR (NETXEN_PCIX_PS_REG(PCIX_INT_VECTOR))
+#define ISR_INT_MASK (NETXEN_PCIX_PS_REG(PCIX_INT_MASK))
+#define ISR_INT_MASK_SLOW (NETXEN_PCIX_PS_REG(PCIX_INT_MASK))
+#define ISR_INT_TARGET_STATUS (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS))
+#define ISR_INT_TARGET_MASK (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK))
+#define ISR_INT_TARGET_STATUS_F1 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F1))
+#define ISR_INT_TARGET_MASK_F1 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F1))
+#define ISR_INT_TARGET_STATUS_F2 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F2))
+#define ISR_INT_TARGET_MASK_F2 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F2))
+#define ISR_INT_TARGET_STATUS_F3 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F3))
+#define ISR_INT_TARGET_MASK_F3 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F3))
+#define ISR_INT_TARGET_STATUS_F4 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F4))
+#define ISR_INT_TARGET_MASK_F4 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F4))
+#define ISR_INT_TARGET_STATUS_F5 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F5))
+#define ISR_INT_TARGET_MASK_F5 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F5))
+#define ISR_INT_TARGET_STATUS_F6 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F6))
+#define ISR_INT_TARGET_MASK_F6 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F6))
+#define ISR_INT_TARGET_STATUS_F7 (NETXEN_PCIX_PS_REG(PCIX_TARGET_STATUS_F7))
+#define ISR_INT_TARGET_MASK_F7 (NETXEN_PCIX_PS_REG(PCIX_TARGET_MASK_F7))
+
+#define NETXEN_PCI_MAPSIZE 128
+#define NETXEN_PCI_DDR_NET (0x00000000UL)
+#define NETXEN_PCI_QDR_NET (0x04000000UL)
+#define NETXEN_PCI_DIRECT_CRB (0x04400000UL)
+#define NETXEN_PCI_CAMQM (0x04800000UL)
+#define NETXEN_PCI_CAMQM_MAX (0x04ffffffUL)
+#define NETXEN_PCI_OCM0 (0x05000000UL)
+#define NETXEN_PCI_OCM0_MAX (0x050fffffUL)
+#define NETXEN_PCI_OCM1 (0x05100000UL)
+#define NETXEN_PCI_OCM1_MAX (0x051fffffUL)
+#define NETXEN_PCI_CRBSPACE (0x06000000UL)
+#define NETXEN_PCI_128MB_SIZE (0x08000000UL)
+#define NETXEN_PCI_32MB_SIZE (0x02000000UL)
+#define NETXEN_PCI_2MB_SIZE (0x00200000UL)
+
+#define NETXEN_PCI_MN_2M (0)
+#define NETXEN_PCI_MS_2M (0x80000)
+#define NETXEN_PCI_OCM0_2M (0x000c0000UL)
+#define NETXEN_PCI_CAMQM_2M_BASE (0x000ff800UL)
+#define NETXEN_PCI_CAMQM_2M_END (0x04800800UL)
+
+#define NETXEN_CRB_CAM NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_CAM)
+
+#define NETXEN_ADDR_DDR_NET (0x0000000000000000ULL)
+#define NETXEN_ADDR_DDR_NET_MAX (0x000000000fffffffULL)
+#define NETXEN_ADDR_OCM0 (0x0000000200000000ULL)
+#define NETXEN_ADDR_OCM0_MAX (0x00000002000fffffULL)
+#define NETXEN_ADDR_OCM1 (0x0000000200400000ULL)
+#define NETXEN_ADDR_OCM1_MAX (0x00000002004fffffULL)
+#define NETXEN_ADDR_QDR_NET (0x0000000300000000ULL)
+#define NETXEN_ADDR_QDR_NET_MAX_P2 (0x00000003003fffffULL)
+#define NETXEN_ADDR_QDR_NET_MAX_P3 (0x0000000303ffffffULL)
+
+/*
+ * Register offsets for MN
+ */
+#define NETXEN_MIU_CONTROL (0x000)
+#define NETXEN_MIU_MN_CONTROL (NETXEN_CRB_DDR_NET+NETXEN_MIU_CONTROL)
+
+ /* 200ms delay in each loop */
+#define NETXEN_NIU_PHY_WAITLEN 200000
+ /* 10 seconds before we give up */
+#define NETXEN_NIU_PHY_WAITMAX 50
+#define NETXEN_NIU_MAX_GBE_PORTS 4
+#define NETXEN_NIU_MAX_XG_PORTS 2
+
+#define NETXEN_NIU_MODE (NETXEN_CRB_NIU + 0x00000)
+
+#define NETXEN_NIU_XG_SINGLE_TERM (NETXEN_CRB_NIU + 0x00004)
+#define NETXEN_NIU_XG_DRIVE_HI (NETXEN_CRB_NIU + 0x00008)
+#define NETXEN_NIU_XG_DRIVE_LO (NETXEN_CRB_NIU + 0x0000c)
+#define NETXEN_NIU_XG_DTX (NETXEN_CRB_NIU + 0x00010)
+#define NETXEN_NIU_XG_DEQ (NETXEN_CRB_NIU + 0x00014)
+#define NETXEN_NIU_XG_WORD_ALIGN (NETXEN_CRB_NIU + 0x00018)
+#define NETXEN_NIU_XG_RESET (NETXEN_CRB_NIU + 0x0001c)
+#define NETXEN_NIU_XG_POWER_DOWN (NETXEN_CRB_NIU + 0x00020)
+#define NETXEN_NIU_XG_RESET_PLL (NETXEN_CRB_NIU + 0x00024)
+#define NETXEN_NIU_XG_SERDES_LOOPBACK (NETXEN_CRB_NIU + 0x00028)
+#define NETXEN_NIU_XG_DO_BYTE_ALIGN (NETXEN_CRB_NIU + 0x0002c)
+#define NETXEN_NIU_XG_TX_ENABLE (NETXEN_CRB_NIU + 0x00030)
+#define NETXEN_NIU_XG_RX_ENABLE (NETXEN_CRB_NIU + 0x00034)
+#define NETXEN_NIU_XG_STATUS (NETXEN_CRB_NIU + 0x00038)
+#define NETXEN_NIU_XG_PAUSE_THRESHOLD (NETXEN_CRB_NIU + 0x0003c)
+#define NETXEN_NIU_INT_MASK (NETXEN_CRB_NIU + 0x00040)
+#define NETXEN_NIU_ACTIVE_INT (NETXEN_CRB_NIU + 0x00044)
+#define NETXEN_NIU_MASKABLE_INT (NETXEN_CRB_NIU + 0x00048)
+
+#define NETXEN_NIU_STRAP_VALUE_SAVE_HIGHER (NETXEN_CRB_NIU + 0x0004c)
+
+#define NETXEN_NIU_GB_SERDES_RESET (NETXEN_CRB_NIU + 0x00050)
+#define NETXEN_NIU_GB0_GMII_MODE (NETXEN_CRB_NIU + 0x00054)
+#define NETXEN_NIU_GB0_MII_MODE (NETXEN_CRB_NIU + 0x00058)
+#define NETXEN_NIU_GB1_GMII_MODE (NETXEN_CRB_NIU + 0x0005c)
+#define NETXEN_NIU_GB1_MII_MODE (NETXEN_CRB_NIU + 0x00060)
+#define NETXEN_NIU_GB2_GMII_MODE (NETXEN_CRB_NIU + 0x00064)
+#define NETXEN_NIU_GB2_MII_MODE (NETXEN_CRB_NIU + 0x00068)
+#define NETXEN_NIU_GB3_GMII_MODE (NETXEN_CRB_NIU + 0x0006c)
+#define NETXEN_NIU_GB3_MII_MODE (NETXEN_CRB_NIU + 0x00070)
+#define NETXEN_NIU_REMOTE_LOOPBACK (NETXEN_CRB_NIU + 0x00074)
+#define NETXEN_NIU_GB0_HALF_DUPLEX (NETXEN_CRB_NIU + 0x00078)
+#define NETXEN_NIU_GB1_HALF_DUPLEX (NETXEN_CRB_NIU + 0x0007c)
+#define NETXEN_NIU_RESET_SYS_FIFOS (NETXEN_CRB_NIU + 0x00088)
+#define NETXEN_NIU_GB_CRC_DROP (NETXEN_CRB_NIU + 0x0008c)
+#define NETXEN_NIU_GB_DROP_WRONGADDR (NETXEN_CRB_NIU + 0x00090)
+#define NETXEN_NIU_TEST_MUX_CTL (NETXEN_CRB_NIU + 0x00094)
+#define NETXEN_NIU_XG_PAUSE_CTL (NETXEN_CRB_NIU + 0x00098)
+#define NETXEN_NIU_XG_PAUSE_LEVEL (NETXEN_CRB_NIU + 0x000dc)
+#define NETXEN_NIU_FRAME_COUNT_SELECT (NETXEN_CRB_NIU + 0x000ac)
+#define NETXEN_NIU_FRAME_COUNT (NETXEN_CRB_NIU + 0x000b0)
+#define NETXEN_NIU_XG_SEL (NETXEN_CRB_NIU + 0x00128)
+#define NETXEN_NIU_GB_PAUSE_CTL (NETXEN_CRB_NIU + 0x0030c)
+
+#define NETXEN_NIU_FULL_LEVEL_XG (NETXEN_CRB_NIU + 0x00450)
+
+#define NETXEN_NIU_XG1_RESET (NETXEN_CRB_NIU + 0x0011c)
+#define NETXEN_NIU_XG1_POWER_DOWN (NETXEN_CRB_NIU + 0x00120)
+#define NETXEN_NIU_XG1_RESET_PLL (NETXEN_CRB_NIU + 0x00124)
+
+#define NETXEN_MAC_ADDR_CNTL_REG (NETXEN_CRB_NIU + 0x1000)
+
+#define NETXEN_MULTICAST_ADDR_HI_0 (NETXEN_CRB_NIU + 0x1010)
+#define NETXEN_MULTICAST_ADDR_HI_1 (NETXEN_CRB_NIU + 0x1014)
+#define NETXEN_MULTICAST_ADDR_HI_2 (NETXEN_CRB_NIU + 0x1018)
+#define NETXEN_MULTICAST_ADDR_HI_3 (NETXEN_CRB_NIU + 0x101c)
+
+#define NETXEN_UNICAST_ADDR_BASE (NETXEN_CRB_NIU + 0x1080)
+#define NETXEN_MULTICAST_ADDR_BASE (NETXEN_CRB_NIU + 0x1100)
+
+#define NETXEN_NIU_GB_MAC_CONFIG_0(I) \
+ (NETXEN_CRB_NIU + 0x30000 + (I)*0x10000)
+#define NETXEN_NIU_GB_MAC_CONFIG_1(I) \
+ (NETXEN_CRB_NIU + 0x30004 + (I)*0x10000)
+#define NETXEN_NIU_GB_MAC_IPG_IFG(I) \
+ (NETXEN_CRB_NIU + 0x30008 + (I)*0x10000)
+#define NETXEN_NIU_GB_HALF_DUPLEX_CTRL(I) \
+ (NETXEN_CRB_NIU + 0x3000c + (I)*0x10000)
+#define NETXEN_NIU_GB_MAX_FRAME_SIZE(I) \
+ (NETXEN_CRB_NIU + 0x30010 + (I)*0x10000)
+#define NETXEN_NIU_GB_TEST_REG(I) \
+ (NETXEN_CRB_NIU + 0x3001c + (I)*0x10000)
+#define NETXEN_NIU_GB_MII_MGMT_CONFIG(I) \
+ (NETXEN_CRB_NIU + 0x30020 + (I)*0x10000)
+#define NETXEN_NIU_GB_MII_MGMT_COMMAND(I) \
+ (NETXEN_CRB_NIU + 0x30024 + (I)*0x10000)
+#define NETXEN_NIU_GB_MII_MGMT_ADDR(I) \
+ (NETXEN_CRB_NIU + 0x30028 + (I)*0x10000)
+#define NETXEN_NIU_GB_MII_MGMT_CTRL(I) \
+ (NETXEN_CRB_NIU + 0x3002c + (I)*0x10000)
+#define NETXEN_NIU_GB_MII_MGMT_STATUS(I) \
+ (NETXEN_CRB_NIU + 0x30030 + (I)*0x10000)
+#define NETXEN_NIU_GB_MII_MGMT_INDICATE(I) \
+ (NETXEN_CRB_NIU + 0x30034 + (I)*0x10000)
+#define NETXEN_NIU_GB_INTERFACE_CTRL(I) \
+ (NETXEN_CRB_NIU + 0x30038 + (I)*0x10000)
+#define NETXEN_NIU_GB_INTERFACE_STATUS(I) \
+ (NETXEN_CRB_NIU + 0x3003c + (I)*0x10000)
+#define NETXEN_NIU_GB_STATION_ADDR_0(I) \
+ (NETXEN_CRB_NIU + 0x30040 + (I)*0x10000)
+#define NETXEN_NIU_GB_STATION_ADDR_1(I) \
+ (NETXEN_CRB_NIU + 0x30044 + (I)*0x10000)
+
+#define NETXEN_NIU_XGE_CONFIG_0 (NETXEN_CRB_NIU + 0x70000)
+#define NETXEN_NIU_XGE_CONFIG_1 (NETXEN_CRB_NIU + 0x70004)
+#define NETXEN_NIU_XGE_IPG (NETXEN_CRB_NIU + 0x70008)
+#define NETXEN_NIU_XGE_STATION_ADDR_0_HI (NETXEN_CRB_NIU + 0x7000c)
+#define NETXEN_NIU_XGE_STATION_ADDR_0_1 (NETXEN_CRB_NIU + 0x70010)
+#define NETXEN_NIU_XGE_STATION_ADDR_1_LO (NETXEN_CRB_NIU + 0x70014)
+#define NETXEN_NIU_XGE_STATUS (NETXEN_CRB_NIU + 0x70018)
+#define NETXEN_NIU_XGE_MAX_FRAME_SIZE (NETXEN_CRB_NIU + 0x7001c)
+#define NETXEN_NIU_XGE_PAUSE_FRAME_VALUE (NETXEN_CRB_NIU + 0x70020)
+#define NETXEN_NIU_XGE_TX_BYTE_CNT (NETXEN_CRB_NIU + 0x70024)
+#define NETXEN_NIU_XGE_TX_FRAME_CNT (NETXEN_CRB_NIU + 0x70028)
+#define NETXEN_NIU_XGE_RX_BYTE_CNT (NETXEN_CRB_NIU + 0x7002c)
+#define NETXEN_NIU_XGE_RX_FRAME_CNT (NETXEN_CRB_NIU + 0x70030)
+#define NETXEN_NIU_XGE_AGGR_ERROR_CNT (NETXEN_CRB_NIU + 0x70034)
+#define NETXEN_NIU_XGE_MULTICAST_FRAME_CNT (NETXEN_CRB_NIU + 0x70038)
+#define NETXEN_NIU_XGE_UNICAST_FRAME_CNT (NETXEN_CRB_NIU + 0x7003c)
+#define NETXEN_NIU_XGE_CRC_ERROR_CNT (NETXEN_CRB_NIU + 0x70040)
+#define NETXEN_NIU_XGE_OVERSIZE_FRAME_ERR (NETXEN_CRB_NIU + 0x70044)
+#define NETXEN_NIU_XGE_UNDERSIZE_FRAME_ERR (NETXEN_CRB_NIU + 0x70048)
+#define NETXEN_NIU_XGE_LOCAL_ERROR_CNT (NETXEN_CRB_NIU + 0x7004c)
+#define NETXEN_NIU_XGE_REMOTE_ERROR_CNT (NETXEN_CRB_NIU + 0x70050)
+#define NETXEN_NIU_XGE_CONTROL_CHAR_CNT (NETXEN_CRB_NIU + 0x70054)
+#define NETXEN_NIU_XGE_PAUSE_FRAME_CNT (NETXEN_CRB_NIU + 0x70058)
+#define NETXEN_NIU_XG1_CONFIG_0 (NETXEN_CRB_NIU + 0x80000)
+#define NETXEN_NIU_XG1_CONFIG_1 (NETXEN_CRB_NIU + 0x80004)
+#define NETXEN_NIU_XG1_IPG (NETXEN_CRB_NIU + 0x80008)
+#define NETXEN_NIU_XG1_STATION_ADDR_0_HI (NETXEN_CRB_NIU + 0x8000c)
+#define NETXEN_NIU_XG1_STATION_ADDR_0_1 (NETXEN_CRB_NIU + 0x80010)
+#define NETXEN_NIU_XG1_STATION_ADDR_1_LO (NETXEN_CRB_NIU + 0x80014)
+#define NETXEN_NIU_XG1_STATUS (NETXEN_CRB_NIU + 0x80018)
+#define NETXEN_NIU_XG1_MAX_FRAME_SIZE (NETXEN_CRB_NIU + 0x8001c)
+#define NETXEN_NIU_XG1_PAUSE_FRAME_VALUE (NETXEN_CRB_NIU + 0x80020)
+#define NETXEN_NIU_XG1_TX_BYTE_CNT (NETXEN_CRB_NIU + 0x80024)
+#define NETXEN_NIU_XG1_TX_FRAME_CNT (NETXEN_CRB_NIU + 0x80028)
+#define NETXEN_NIU_XG1_RX_BYTE_CNT (NETXEN_CRB_NIU + 0x8002c)
+#define NETXEN_NIU_XG1_RX_FRAME_CNT (NETXEN_CRB_NIU + 0x80030)
+#define NETXEN_NIU_XG1_AGGR_ERROR_CNT (NETXEN_CRB_NIU + 0x80034)
+#define NETXEN_NIU_XG1_MULTICAST_FRAME_CNT (NETXEN_CRB_NIU + 0x80038)
+#define NETXEN_NIU_XG1_UNICAST_FRAME_CNT (NETXEN_CRB_NIU + 0x8003c)
+#define NETXEN_NIU_XG1_CRC_ERROR_CNT (NETXEN_CRB_NIU + 0x80040)
+#define NETXEN_NIU_XG1_OVERSIZE_FRAME_ERR (NETXEN_CRB_NIU + 0x80044)
+#define NETXEN_NIU_XG1_UNDERSIZE_FRAME_ERR (NETXEN_CRB_NIU + 0x80048)
+#define NETXEN_NIU_XG1_LOCAL_ERROR_CNT (NETXEN_CRB_NIU + 0x8004c)
+#define NETXEN_NIU_XG1_REMOTE_ERROR_CNT (NETXEN_CRB_NIU + 0x80050)
+#define NETXEN_NIU_XG1_CONTROL_CHAR_CNT (NETXEN_CRB_NIU + 0x80054)
+#define NETXEN_NIU_XG1_PAUSE_FRAME_CNT (NETXEN_CRB_NIU + 0x80058)
+
+/* P3 802.3ap */
+#define NETXEN_NIU_AP_MAC_CONFIG_0(I) (NETXEN_CRB_NIU+0xa0000+(I)*0x10000)
+#define NETXEN_NIU_AP_MAC_CONFIG_1(I) (NETXEN_CRB_NIU+0xa0004+(I)*0x10000)
+#define NETXEN_NIU_AP_MAC_IPG_IFG(I) (NETXEN_CRB_NIU+0xa0008+(I)*0x10000)
+#define NETXEN_NIU_AP_HALF_DUPLEX_CTRL(I) (NETXEN_CRB_NIU+0xa000c+(I)*0x10000)
+#define NETXEN_NIU_AP_MAX_FRAME_SIZE(I) (NETXEN_CRB_NIU+0xa0010+(I)*0x10000)
+#define NETXEN_NIU_AP_TEST_REG(I) (NETXEN_CRB_NIU+0xa001c+(I)*0x10000)
+#define NETXEN_NIU_AP_MII_MGMT_CONFIG(I) (NETXEN_CRB_NIU+0xa0020+(I)*0x10000)
+#define NETXEN_NIU_AP_MII_MGMT_COMMAND(I) (NETXEN_CRB_NIU+0xa0024+(I)*0x10000)
+#define NETXEN_NIU_AP_MII_MGMT_ADDR(I) (NETXEN_CRB_NIU+0xa0028+(I)*0x10000)
+#define NETXEN_NIU_AP_MII_MGMT_CTRL(I) (NETXEN_CRB_NIU+0xa002c+(I)*0x10000)
+#define NETXEN_NIU_AP_MII_MGMT_STATUS(I) (NETXEN_CRB_NIU+0xa0030+(I)*0x10000)
+#define NETXEN_NIU_AP_MII_MGMT_INDICATE(I) (NETXEN_CRB_NIU+0xa0034+(I)*0x10000)
+#define NETXEN_NIU_AP_INTERFACE_CTRL(I) (NETXEN_CRB_NIU+0xa0038+(I)*0x10000)
+#define NETXEN_NIU_AP_INTERFACE_STATUS(I) (NETXEN_CRB_NIU+0xa003c+(I)*0x10000)
+#define NETXEN_NIU_AP_STATION_ADDR_0(I) (NETXEN_CRB_NIU+0xa0040+(I)*0x10000)
+#define NETXEN_NIU_AP_STATION_ADDR_1(I) (NETXEN_CRB_NIU+0xa0044+(I)*0x10000)
+
+
+#define TEST_AGT_CTRL (0x00)
+
+#define TA_CTL_START 1
+#define TA_CTL_ENABLE 2
+#define TA_CTL_WRITE 4
+#define TA_CTL_BUSY 8
+
+/*
+ * Register offsets for MN
+ */
+#define MIU_TEST_AGT_BASE (0x90)
+
+#define MIU_TEST_AGT_ADDR_LO (0x04)
+#define MIU_TEST_AGT_ADDR_HI (0x08)
+#define MIU_TEST_AGT_WRDATA_LO (0x10)
+#define MIU_TEST_AGT_WRDATA_HI (0x14)
+#define MIU_TEST_AGT_RDDATA_LO (0x18)
+#define MIU_TEST_AGT_RDDATA_HI (0x1c)
+
+#define MIU_TEST_AGT_ADDR_MASK 0xfffffff8
+#define MIU_TEST_AGT_UPPER_ADDR(off) (0)
+
+/*
+ * Register offsets for MS
+ */
+#define SIU_TEST_AGT_BASE (0x60)
+
+#define SIU_TEST_AGT_ADDR_LO (0x04)
+#define SIU_TEST_AGT_ADDR_HI (0x18)
+#define SIU_TEST_AGT_WRDATA_LO (0x08)
+#define SIU_TEST_AGT_WRDATA_HI (0x0c)
+#define SIU_TEST_AGT_WRDATA(i) (0x08+(4*(i)))
+#define SIU_TEST_AGT_RDDATA_LO (0x10)
+#define SIU_TEST_AGT_RDDATA_HI (0x14)
+#define SIU_TEST_AGT_RDDATA(i) (0x10+(4*(i)))
+
+#define SIU_TEST_AGT_ADDR_MASK 0x3ffff8
+#define SIU_TEST_AGT_UPPER_ADDR(off) ((off)>>22)
+
+/* XG Link status */
+#define XG_LINK_UP 0x10
+#define XG_LINK_DOWN 0x20
+
+#define XG_LINK_UP_P3 0x01
+#define XG_LINK_DOWN_P3 0x02
+#define XG_LINK_STATE_P3_MASK 0xf
+#define XG_LINK_STATE_P3(pcifn,val) \
+ (((val) >> ((pcifn) * 4)) & XG_LINK_STATE_P3_MASK)
+
+#define P3_LINK_SPEED_MHZ 100
+#define P3_LINK_SPEED_MASK 0xff
+#define P3_LINK_SPEED_REG(pcifn) \
+ (CRB_PF_LINK_SPEED_1 + (((pcifn) / 4) * 4))
+#define P3_LINK_SPEED_VAL(pcifn, reg) \
+ (((reg) >> (8 * ((pcifn) & 0x3))) & P3_LINK_SPEED_MASK)
+
+#define NETXEN_CAM_RAM_BASE (NETXEN_CRB_CAM + 0x02000)
+#define NETXEN_CAM_RAM(reg) (NETXEN_CAM_RAM_BASE + (reg))
+#define NETXEN_FW_VERSION_MAJOR (NETXEN_CAM_RAM(0x150))
+#define NETXEN_FW_VERSION_MINOR (NETXEN_CAM_RAM(0x154))
+#define NETXEN_FW_VERSION_SUB (NETXEN_CAM_RAM(0x158))
+#define NETXEN_ROM_LOCK_ID (NETXEN_CAM_RAM(0x100))
+#define NETXEN_PHY_LOCK_ID (NETXEN_CAM_RAM(0x120))
+#define NETXEN_CRB_WIN_LOCK_ID (NETXEN_CAM_RAM(0x124))
+
+#define NIC_CRB_BASE (NETXEN_CAM_RAM(0x200))
+#define NIC_CRB_BASE_2 (NETXEN_CAM_RAM(0x700))
+#define NETXEN_NIC_REG(X) (NIC_CRB_BASE+(X))
+#define NETXEN_NIC_REG_2(X) (NIC_CRB_BASE_2+(X))
+#define NETXEN_INTR_MODE_REG NETXEN_NIC_REG(0x44)
+#define NETXEN_MSI_MODE 0x1
+#define NETXEN_INTX_MODE 0x2
+
+#define NX_CDRP_CRB_OFFSET (NETXEN_NIC_REG(0x18))
+#define NX_ARG1_CRB_OFFSET (NETXEN_NIC_REG(0x1c))
+#define NX_ARG2_CRB_OFFSET (NETXEN_NIC_REG(0x20))
+#define NX_ARG3_CRB_OFFSET (NETXEN_NIC_REG(0x24))
+#define NX_SIGN_CRB_OFFSET (NETXEN_NIC_REG(0x28))
+
+#define CRB_HOST_DUMMY_BUF_ADDR_HI (NETXEN_NIC_REG(0x3c))
+#define CRB_HOST_DUMMY_BUF_ADDR_LO (NETXEN_NIC_REG(0x40))
+
+#define CRB_CMDPEG_STATE (NETXEN_NIC_REG(0x50))
+#define CRB_RCVPEG_STATE (NETXEN_NIC_REG(0x13c))
+
+#define CRB_XG_STATE (NETXEN_NIC_REG(0x94))
+#define CRB_XG_STATE_P3 (NETXEN_NIC_REG(0x98))
+#define CRB_PF_LINK_SPEED_1 (NETXEN_NIC_REG(0xe8))
+#define CRB_PF_LINK_SPEED_2 (NETXEN_NIC_REG(0xec))
+
+#define CRB_MPORT_MODE (NETXEN_NIC_REG(0xc4))
+#define CRB_DMA_SHIFT (NETXEN_NIC_REG(0xcc))
+#define CRB_INT_VECTOR (NETXEN_NIC_REG(0xd4))
+
+#define CRB_CMD_PRODUCER_OFFSET (NETXEN_NIC_REG(0x08))
+#define CRB_CMD_CONSUMER_OFFSET (NETXEN_NIC_REG(0x0c))
+#define CRB_CMD_PRODUCER_OFFSET_1 (NETXEN_NIC_REG(0x1ac))
+#define CRB_CMD_CONSUMER_OFFSET_1 (NETXEN_NIC_REG(0x1b0))
+#define CRB_CMD_PRODUCER_OFFSET_2 (NETXEN_NIC_REG(0x1b8))
+#define CRB_CMD_CONSUMER_OFFSET_2 (NETXEN_NIC_REG(0x1bc))
+#define CRB_CMD_PRODUCER_OFFSET_3 (NETXEN_NIC_REG(0x1d0))
+#define CRB_CMD_CONSUMER_OFFSET_3 (NETXEN_NIC_REG(0x1d4))
+#define CRB_TEMP_STATE (NETXEN_NIC_REG(0x1b4))
+
+#define CRB_V2P_0 (NETXEN_NIC_REG(0x290))
+#define CRB_V2P(port) (CRB_V2P_0+((port)*4))
+#define CRB_DRIVER_VERSION (NETXEN_NIC_REG(0x2a0))
+
+#define CRB_SW_INT_MASK_0 (NETXEN_NIC_REG(0x1d8))
+#define CRB_SW_INT_MASK_1 (NETXEN_NIC_REG(0x1e0))
+#define CRB_SW_INT_MASK_2 (NETXEN_NIC_REG(0x1e4))
+#define CRB_SW_INT_MASK_3 (NETXEN_NIC_REG(0x1e8))
+
+#define CRB_FW_CAPABILITIES_1 (NETXEN_CAM_RAM(0x128))
+#define CRB_FW_CAPABILITIES_2 (NETXEN_CAM_RAM(0x12c))
+#define CRB_MAC_BLOCK_START (NETXEN_CAM_RAM(0x1c0))
+
+/*
+ * capabilities register, can be used to selectively enable/disable features
+ * for backward compatibility
+ */
+#define CRB_NIC_CAPABILITIES_HOST NETXEN_NIC_REG(0x1a8)
+#define CRB_NIC_MSI_MODE_HOST NETXEN_NIC_REG(0x270)
+
+#define INTR_SCHEME_PERPORT 0x1
+#define MSI_MODE_MULTIFUNC 0x1
+
+/* used for ethtool tests */
+#define CRB_SCRATCHPAD_TEST NETXEN_NIC_REG(0x280)
+
+/*
+ * CrbPortPhanCntrHi/Lo is used to pass the address of HostPhantomIndex address
+ * which can be read by the Phantom host to get producer/consumer indexes from
+ * Phantom/Casper. If it is not HOST_SHARED_MEMORY, then the following
+ * registers will be used for the addresses of the ring's shared memory
+ * on the Phantom.
+ */
+
+#define nx_get_temp_val(x) ((x) >> 16)
+#define nx_get_temp_state(x) ((x) & 0xffff)
+#define nx_encode_temp(val, state) (((val) << 16) | (state))
+
+/*
+ * Temperature control.
+ */
+enum {
+ NX_TEMP_NORMAL = 0x1, /* Normal operating range */
+ NX_TEMP_WARN, /* Sound alert, temperature getting high */
+ NX_TEMP_PANIC /* Fatal error, hardware has shut down. */
+};
+
+/* Lock IDs for PHY lock */
+#define PHY_LOCK_DRIVER 0x44524956
+
+/* Used for PS PCI Memory access */
+#define PCIX_PS_OP_ADDR_LO (0x10000)
+/* via CRB (PS side only) */
+#define PCIX_PS_OP_ADDR_HI (0x10004)
+
+#define PCIX_INT_VECTOR (0x10100)
+#define PCIX_INT_MASK (0x10104)
+
+#define PCIX_CRB_WINDOW (0x10210)
+#define PCIX_CRB_WINDOW_F0 (0x10210)
+#define PCIX_CRB_WINDOW_F1 (0x10230)
+#define PCIX_CRB_WINDOW_F2 (0x10250)
+#define PCIX_CRB_WINDOW_F3 (0x10270)
+#define PCIX_CRB_WINDOW_F4 (0x102ac)
+#define PCIX_CRB_WINDOW_F5 (0x102bc)
+#define PCIX_CRB_WINDOW_F6 (0x102cc)
+#define PCIX_CRB_WINDOW_F7 (0x102dc)
+#define PCIE_CRB_WINDOW_REG(func) (((func) < 4) ? \
+ (PCIX_CRB_WINDOW_F0 + (0x20 * (func))) :\
+ (PCIX_CRB_WINDOW_F4 + (0x10 * ((func)-4))))
+
+#define PCIX_MN_WINDOW (0x10200)
+#define PCIX_MN_WINDOW_F0 (0x10200)
+#define PCIX_MN_WINDOW_F1 (0x10220)
+#define PCIX_MN_WINDOW_F2 (0x10240)
+#define PCIX_MN_WINDOW_F3 (0x10260)
+#define PCIX_MN_WINDOW_F4 (0x102a0)
+#define PCIX_MN_WINDOW_F5 (0x102b0)
+#define PCIX_MN_WINDOW_F6 (0x102c0)
+#define PCIX_MN_WINDOW_F7 (0x102d0)
+#define PCIE_MN_WINDOW_REG(func) (((func) < 4) ? \
+ (PCIX_MN_WINDOW_F0 + (0x20 * (func))) :\
+ (PCIX_MN_WINDOW_F4 + (0x10 * ((func)-4))))
+
+#define PCIX_SN_WINDOW (0x10208)
+#define PCIX_SN_WINDOW_F0 (0x10208)
+#define PCIX_SN_WINDOW_F1 (0x10228)
+#define PCIX_SN_WINDOW_F2 (0x10248)
+#define PCIX_SN_WINDOW_F3 (0x10268)
+#define PCIX_SN_WINDOW_F4 (0x102a8)
+#define PCIX_SN_WINDOW_F5 (0x102b8)
+#define PCIX_SN_WINDOW_F6 (0x102c8)
+#define PCIX_SN_WINDOW_F7 (0x102d8)
+#define PCIE_SN_WINDOW_REG(func) (((func) < 4) ? \
+ (PCIX_SN_WINDOW_F0 + (0x20 * (func))) :\
+ (PCIX_SN_WINDOW_F4 + (0x10 * ((func)-4))))
+
+#define PCIX_OCM_WINDOW (0x10800)
+#define PCIX_OCM_WINDOW_REG(func) (PCIX_OCM_WINDOW + 0x20 * (func))
+
+#define PCIX_TARGET_STATUS (0x10118)
+#define PCIX_TARGET_STATUS_F1 (0x10160)
+#define PCIX_TARGET_STATUS_F2 (0x10164)
+#define PCIX_TARGET_STATUS_F3 (0x10168)
+#define PCIX_TARGET_STATUS_F4 (0x10360)
+#define PCIX_TARGET_STATUS_F5 (0x10364)
+#define PCIX_TARGET_STATUS_F6 (0x10368)
+#define PCIX_TARGET_STATUS_F7 (0x1036c)
+
+#define PCIX_TARGET_MASK (0x10128)
+#define PCIX_TARGET_MASK_F1 (0x10170)
+#define PCIX_TARGET_MASK_F2 (0x10174)
+#define PCIX_TARGET_MASK_F3 (0x10178)
+#define PCIX_TARGET_MASK_F4 (0x10370)
+#define PCIX_TARGET_MASK_F5 (0x10374)
+#define PCIX_TARGET_MASK_F6 (0x10378)
+#define PCIX_TARGET_MASK_F7 (0x1037c)
+
+#define PCIX_MSI_F0 (0x13000)
+#define PCIX_MSI_F1 (0x13004)
+#define PCIX_MSI_F2 (0x13008)
+#define PCIX_MSI_F3 (0x1300c)
+#define PCIX_MSI_F4 (0x13010)
+#define PCIX_MSI_F5 (0x13014)
+#define PCIX_MSI_F6 (0x13018)
+#define PCIX_MSI_F7 (0x1301c)
+#define PCIX_MSI_F(i) (0x13000+((i)*4))
+
+#define PCIX_PS_MEM_SPACE (0x90000)
+
+#define NETXEN_PCIX_PH_REG(reg) (NETXEN_CRB_PCIE + (reg))
+#define NETXEN_PCIX_PS_REG(reg) (NETXEN_CRB_PCIX_MD + (reg))
+
+#define NETXEN_PCIE_REG(reg) (NETXEN_CRB_PCIE + (reg))
+
+#define PCIE_MAX_DMA_XFER_SIZE (0x1404c)
+
+#define PCIE_DCR 0x00d8
+
+#define PCIE_SEM0_LOCK (0x1c000)
+#define PCIE_SEM0_UNLOCK (0x1c004)
+#define PCIE_SEM1_LOCK (0x1c008)
+#define PCIE_SEM1_UNLOCK (0x1c00c)
+#define PCIE_SEM2_LOCK (0x1c010) /* Flash lock */
+#define PCIE_SEM2_UNLOCK (0x1c014) /* Flash unlock */
+#define PCIE_SEM3_LOCK (0x1c018) /* Phy lock */
+#define PCIE_SEM3_UNLOCK (0x1c01c) /* Phy unlock */
+#define PCIE_SEM4_LOCK (0x1c020)
+#define PCIE_SEM4_UNLOCK (0x1c024)
+#define PCIE_SEM5_LOCK (0x1c028) /* API lock */
+#define PCIE_SEM5_UNLOCK (0x1c02c) /* API unlock */
+#define PCIE_SEM6_LOCK (0x1c030) /* sw lock */
+#define PCIE_SEM6_UNLOCK (0x1c034) /* sw unlock */
+#define PCIE_SEM7_LOCK (0x1c038) /* crb win lock */
+#define PCIE_SEM7_UNLOCK (0x1c03c) /* crbwin unlock*/
+#define PCIE_SEM_LOCK(N) (PCIE_SEM0_LOCK + 8*(N))
+#define PCIE_SEM_UNLOCK(N) (PCIE_SEM0_UNLOCK + 8*(N))
+
+#define PCIE_SETUP_FUNCTION (0x12040)
+#define PCIE_SETUP_FUNCTION2 (0x12048)
+#define PCIE_MISCCFG_RC (0x1206c)
+#define PCIE_TGT_SPLIT_CHICKEN (0x12080)
+#define PCIE_CHICKEN3 (0x120c8)
+
+#define ISR_INT_STATE_REG (NETXEN_PCIX_PS_REG(PCIE_MISCCFG_RC))
+#define PCIE_MAX_MASTER_SPLIT (0x14048)
+
+#define NETXEN_PORT_MODE_NONE 0
+#define NETXEN_PORT_MODE_XG 1
+#define NETXEN_PORT_MODE_GB 2
+#define NETXEN_PORT_MODE_802_3_AP 3
+#define NETXEN_PORT_MODE_AUTO_NEG 4
+#define NETXEN_PORT_MODE_AUTO_NEG_1G 5
+#define NETXEN_PORT_MODE_AUTO_NEG_XG 6
+#define NETXEN_PORT_MODE_ADDR (NETXEN_CAM_RAM(0x24))
+#define NETXEN_WOL_PORT_MODE (NETXEN_CAM_RAM(0x198))
+
+#define NETXEN_WOL_CONFIG_NV (NETXEN_CAM_RAM(0x184))
+#define NETXEN_WOL_CONFIG (NETXEN_CAM_RAM(0x188))
+
+#define NX_PEG_TUNE_MN_PRESENT 0x1
+#define NX_PEG_TUNE_CAPABILITY (NETXEN_CAM_RAM(0x02c))
+
+#define NETXEN_DMA_WATCHDOG_CTRL (NETXEN_CAM_RAM(0x14))
+#define NETXEN_PEG_ALIVE_COUNTER (NETXEN_CAM_RAM(0xb0))
+#define NETXEN_PEG_HALT_STATUS1 (NETXEN_CAM_RAM(0xa8))
+#define NETXEN_PEG_HALT_STATUS2 (NETXEN_CAM_RAM(0xac))
+#define NX_CRB_DEV_REF_COUNT (NETXEN_CAM_RAM(0x138))
+#define NX_CRB_DEV_STATE (NETXEN_CAM_RAM(0x140))
+#define NETXEN_ULA_KEY (NETXEN_CAM_RAM(0x178))
+
+/* MiniDIMM related macros */
+#define NETXEN_DIMM_CAPABILITY (NETXEN_CAM_RAM(0x258))
+#define NETXEN_DIMM_PRESENT 0x1
+#define NETXEN_DIMM_MEMTYPE_DDR2_SDRAM 0x2
+#define NETXEN_DIMM_SIZE 0x4
+#define NETXEN_DIMM_MEMTYPE(VAL) ((VAL >> 3) & 0xf)
+#define NETXEN_DIMM_NUMROWS(VAL) ((VAL >> 7) & 0xf)
+#define NETXEN_DIMM_NUMCOLS(VAL) ((VAL >> 11) & 0xf)
+#define NETXEN_DIMM_NUMRANKS(VAL) ((VAL >> 15) & 0x3)
+#define NETXEN_DIMM_DATAWIDTH(VAL) ((VAL >> 18) & 0x3)
+#define NETXEN_DIMM_NUMBANKS(VAL) ((VAL >> 21) & 0xf)
+#define NETXEN_DIMM_TYPE(VAL) ((VAL >> 25) & 0x3f)
+#define NETXEN_DIMM_VALID_FLAG 0x80000000
+
+#define NETXEN_DIMM_MEM_DDR2_SDRAM 0x8
+
+#define NETXEN_DIMM_STD_MEM_SIZE 512
+
+#define NETXEN_DIMM_TYPE_RDIMM 0x1
+#define NETXEN_DIMM_TYPE_UDIMM 0x2
+#define NETXEN_DIMM_TYPE_SO_DIMM 0x4
+#define NETXEN_DIMM_TYPE_Micro_DIMM 0x8
+#define NETXEN_DIMM_TYPE_Mini_RDIMM 0x10
+#define NETXEN_DIMM_TYPE_Mini_UDIMM 0x20
+
+/* Device State */
+#define NX_DEV_COLD 1
+#define NX_DEV_INITALIZING 2
+#define NX_DEV_READY 3
+#define NX_DEV_NEED_RESET 4
+#define NX_DEV_NEED_QUISCENT 5
+#define NX_DEV_NEED_AER 6
+#define NX_DEV_FAILED 7
+
+#define NX_RCODE_DRIVER_INFO 0x20000000
+#define NX_RCODE_DRIVER_CAN_RELOAD 0x40000000
+#define NX_RCODE_FATAL_ERROR 0x80000000
+#define NX_FWERROR_PEGNUM(code) ((code) & 0xff)
+#define NX_FWERROR_CODE(code) ((code >> 8) & 0xfffff)
+#define NX_FWERROR_PEGSTAT1(code) ((code >> 8) & 0x1fffff)
+
+#define FW_POLL_DELAY (2 * HZ)
+#define FW_FAIL_THRESH 3
+#define FW_POLL_THRESH 10
+
+#define ISR_MSI_INT_TRIGGER(FUNC) (NETXEN_PCIX_PS_REG(PCIX_MSI_F(FUNC)))
+#define ISR_LEGACY_INT_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200)
+
+/*
+ * PCI Interrupt Vector Values.
+ */
+#define PCIX_INT_VECTOR_BIT_F0 0x0080
+#define PCIX_INT_VECTOR_BIT_F1 0x0100
+#define PCIX_INT_VECTOR_BIT_F2 0x0200
+#define PCIX_INT_VECTOR_BIT_F3 0x0400
+#define PCIX_INT_VECTOR_BIT_F4 0x0800
+#define PCIX_INT_VECTOR_BIT_F5 0x1000
+#define PCIX_INT_VECTOR_BIT_F6 0x2000
+#define PCIX_INT_VECTOR_BIT_F7 0x4000
+
+struct netxen_legacy_intr_set {
+ uint32_t int_vec_bit;
+ uint32_t tgt_status_reg;
+ uint32_t tgt_mask_reg;
+ uint32_t pci_int_reg;
+};
+
+#define NX_LEGACY_INTR_CONFIG \
+{ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F0, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(0) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F1, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F1, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F1, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(1) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F2, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F2, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F2, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(2) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F3, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F3, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F3, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(3) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F4, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F4, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F4, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(4) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F5, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F5, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F5, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(5) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F6, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F6, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F6, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(6) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F7, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F7, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F7, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(7) }, \
+}
+
+#endif /* __NETXEN_NIC_HDR_H_ */
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
new file mode 100644
index 000000000..6e12cd21a
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
@@ -0,0 +1,2548 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2003 - 2009 NetXen, Inc.
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ */
+
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/slab.h>
+#include "netxen_nic.h"
+#include "netxen_nic_hw.h"
+
+#include <net/ip.h>
+
+#define MASK(n) ((1ULL<<(n))-1)
+#define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | ((addr >> 25) & 0x3ff))
+#define OCM_WIN(addr) (((addr & 0x1ff0000) >> 1) | ((addr >> 25) & 0x3ff))
+#define MS_WIN(addr) (addr & 0x0ffc0000)
+
+#define GET_MEM_OFFS_2M(addr) (addr & MASK(18))
+
+#define CRB_BLK(off) ((off >> 20) & 0x3f)
+#define CRB_SUBBLK(off) ((off >> 16) & 0xf)
+#define CRB_WINDOW_2M (0x130060)
+#define CRB_HI(off) ((crb_hub_agt[CRB_BLK(off)] << 20) | ((off) & 0xf0000))
+#define CRB_INDIRECT_2M (0x1e0000UL)
+
+static void netxen_nic_io_write_128M(struct netxen_adapter *adapter,
+ void __iomem *addr, u32 data);
+static u32 netxen_nic_io_read_128M(struct netxen_adapter *adapter,
+ void __iomem *addr);
+
+#define PCI_OFFSET_FIRST_RANGE(adapter, off) \
+ ((adapter)->ahw.pci_base0 + (off))
+#define PCI_OFFSET_SECOND_RANGE(adapter, off) \
+ ((adapter)->ahw.pci_base1 + (off) - SECOND_PAGE_GROUP_START)
+#define PCI_OFFSET_THIRD_RANGE(adapter, off) \
+ ((adapter)->ahw.pci_base2 + (off) - THIRD_PAGE_GROUP_START)
+
+static void __iomem *pci_base_offset(struct netxen_adapter *adapter,
+ unsigned long off)
+{
+ if (ADDR_IN_RANGE(off, FIRST_PAGE_GROUP_START, FIRST_PAGE_GROUP_END))
+ return PCI_OFFSET_FIRST_RANGE(adapter, off);
+
+ if (ADDR_IN_RANGE(off, SECOND_PAGE_GROUP_START, SECOND_PAGE_GROUP_END))
+ return PCI_OFFSET_SECOND_RANGE(adapter, off);
+
+ if (ADDR_IN_RANGE(off, THIRD_PAGE_GROUP_START, THIRD_PAGE_GROUP_END))
+ return PCI_OFFSET_THIRD_RANGE(adapter, off);
+
+ return NULL;
+}
+
+static crb_128M_2M_block_map_t
+crb_128M_2M_map[64] __cacheline_aligned_in_smp = {
+ {{{0, 0, 0, 0} } }, /* 0: PCI */
+ {{{1, 0x0100000, 0x0102000, 0x120000}, /* 1: PCIE */
+ {1, 0x0110000, 0x0120000, 0x130000},
+ {1, 0x0120000, 0x0122000, 0x124000},
+ {1, 0x0130000, 0x0132000, 0x126000},
+ {1, 0x0140000, 0x0142000, 0x128000},
+ {1, 0x0150000, 0x0152000, 0x12a000},
+ {1, 0x0160000, 0x0170000, 0x110000},
+ {1, 0x0170000, 0x0172000, 0x12e000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x01e0000, 0x01e0800, 0x122000},
+ {0, 0x0000000, 0x0000000, 0x000000} } },
+ {{{1, 0x0200000, 0x0210000, 0x180000} } },/* 2: MN */
+ {{{0, 0, 0, 0} } }, /* 3: */
+ {{{1, 0x0400000, 0x0401000, 0x169000} } },/* 4: P2NR1 */
+ {{{1, 0x0500000, 0x0510000, 0x140000} } },/* 5: SRE */
+ {{{1, 0x0600000, 0x0610000, 0x1c0000} } },/* 6: NIU */
+ {{{1, 0x0700000, 0x0704000, 0x1b8000} } },/* 7: QM */
+ {{{1, 0x0800000, 0x0802000, 0x170000}, /* 8: SQM0 */
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x08f0000, 0x08f2000, 0x172000} } },
+ {{{1, 0x0900000, 0x0902000, 0x174000}, /* 9: SQM1*/
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x09f0000, 0x09f2000, 0x176000} } },
+ {{{0, 0x0a00000, 0x0a02000, 0x178000}, /* 10: SQM2*/
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x0af0000, 0x0af2000, 0x17a000} } },
+ {{{0, 0x0b00000, 0x0b02000, 0x17c000}, /* 11: SQM3*/
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x0bf0000, 0x0bf2000, 0x17e000} } },
+ {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },/* 12: I2Q */
+ {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },/* 13: TMR */
+ {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },/* 14: ROMUSB */
+ {{{1, 0x0f00000, 0x0f01000, 0x164000} } },/* 15: PEG4 */
+ {{{0, 0x1000000, 0x1004000, 0x1a8000} } },/* 16: XDMA */
+ {{{1, 0x1100000, 0x1101000, 0x160000} } },/* 17: PEG0 */
+ {{{1, 0x1200000, 0x1201000, 0x161000} } },/* 18: PEG1 */
+ {{{1, 0x1300000, 0x1301000, 0x162000} } },/* 19: PEG2 */
+ {{{1, 0x1400000, 0x1401000, 0x163000} } },/* 20: PEG3 */
+ {{{1, 0x1500000, 0x1501000, 0x165000} } },/* 21: P2ND */
+ {{{1, 0x1600000, 0x1601000, 0x166000} } },/* 22: P2NI */
+ {{{0, 0, 0, 0} } }, /* 23: */
+ {{{0, 0, 0, 0} } }, /* 24: */
+ {{{0, 0, 0, 0} } }, /* 25: */
+ {{{0, 0, 0, 0} } }, /* 26: */
+ {{{0, 0, 0, 0} } }, /* 27: */
+ {{{0, 0, 0, 0} } }, /* 28: */
+ {{{1, 0x1d00000, 0x1d10000, 0x190000} } },/* 29: MS */
+ {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },/* 30: P2NR2 */
+ {{{1, 0x1f00000, 0x1f10000, 0x150000} } },/* 31: EPG */
+ {{{0} } }, /* 32: PCI */
+ {{{1, 0x2100000, 0x2102000, 0x120000}, /* 33: PCIE */
+ {1, 0x2110000, 0x2120000, 0x130000},
+ {1, 0x2120000, 0x2122000, 0x124000},
+ {1, 0x2130000, 0x2132000, 0x126000},
+ {1, 0x2140000, 0x2142000, 0x128000},
+ {1, 0x2150000, 0x2152000, 0x12a000},
+ {1, 0x2160000, 0x2170000, 0x110000},
+ {1, 0x2170000, 0x2172000, 0x12e000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000} } },
+ {{{1, 0x2200000, 0x2204000, 0x1b0000} } },/* 34: CAM */
+ {{{0} } }, /* 35: */
+ {{{0} } }, /* 36: */
+ {{{0} } }, /* 37: */
+ {{{0} } }, /* 38: */
+ {{{0} } }, /* 39: */
+ {{{1, 0x2800000, 0x2804000, 0x1a4000} } },/* 40: TMR */
+ {{{1, 0x2900000, 0x2901000, 0x16b000} } },/* 41: P2NR3 */
+ {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },/* 42: RPMX1 */
+ {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },/* 43: RPMX2 */
+ {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },/* 44: RPMX3 */
+ {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },/* 45: RPMX4 */
+ {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },/* 46: RPMX5 */
+ {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },/* 47: RPMX6 */
+ {{{1, 0x3000000, 0x3000400, 0x1adc00} } },/* 48: RPMX7 */
+ {{{0, 0x3100000, 0x3104000, 0x1a8000} } },/* 49: XDMA */
+ {{{1, 0x3200000, 0x3204000, 0x1d4000} } },/* 50: I2Q */
+ {{{1, 0x3300000, 0x3304000, 0x1a0000} } },/* 51: ROMUSB */
+ {{{0} } }, /* 52: */
+ {{{1, 0x3500000, 0x3500400, 0x1ac000} } },/* 53: RPMX0 */
+ {{{1, 0x3600000, 0x3600400, 0x1ae000} } },/* 54: RPMX8 */
+ {{{1, 0x3700000, 0x3700400, 0x1ae400} } },/* 55: RPMX9 */
+ {{{1, 0x3800000, 0x3804000, 0x1d0000} } },/* 56: OCM0 */
+ {{{1, 0x3900000, 0x3904000, 0x1b4000} } },/* 57: CRYPTO */
+ {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },/* 58: SMB */
+ {{{0} } }, /* 59: I2C0 */
+ {{{0} } }, /* 60: I2C1 */
+ {{{1, 0x3d00000, 0x3d04000, 0x1d8000} } },/* 61: LPC */
+ {{{1, 0x3e00000, 0x3e01000, 0x167000} } },/* 62: P2NC */
+ {{{1, 0x3f00000, 0x3f01000, 0x168000} } } /* 63: P2NR0 */
+};
+
+/*
+ * top 12 bits of crb internal address (hub, agent)
+ */
+static unsigned crb_hub_agt[64] =
+{
+ 0,
+ NETXEN_HW_CRB_HUB_AGT_ADR_PS,
+ NETXEN_HW_CRB_HUB_AGT_ADR_MN,
+ NETXEN_HW_CRB_HUB_AGT_ADR_MS,
+ 0,
+ NETXEN_HW_CRB_HUB_AGT_ADR_SRE,
+ NETXEN_HW_CRB_HUB_AGT_ADR_NIU,
+ NETXEN_HW_CRB_HUB_AGT_ADR_QMN,
+ NETXEN_HW_CRB_HUB_AGT_ADR_SQN0,
+ NETXEN_HW_CRB_HUB_AGT_ADR_SQN1,
+ NETXEN_HW_CRB_HUB_AGT_ADR_SQN2,
+ NETXEN_HW_CRB_HUB_AGT_ADR_SQN3,
+ NETXEN_HW_CRB_HUB_AGT_ADR_I2Q,
+ NETXEN_HW_CRB_HUB_AGT_ADR_TIMR,
+ NETXEN_HW_CRB_HUB_AGT_ADR_ROMUSB,
+ NETXEN_HW_CRB_HUB_AGT_ADR_PGN4,
+ NETXEN_HW_CRB_HUB_AGT_ADR_XDMA,
+ NETXEN_HW_CRB_HUB_AGT_ADR_PGN0,
+ NETXEN_HW_CRB_HUB_AGT_ADR_PGN1,
+ NETXEN_HW_CRB_HUB_AGT_ADR_PGN2,
+ NETXEN_HW_CRB_HUB_AGT_ADR_PGN3,
+ NETXEN_HW_CRB_HUB_AGT_ADR_PGND,
+ NETXEN_HW_CRB_HUB_AGT_ADR_PGNI,
+ NETXEN_HW_CRB_HUB_AGT_ADR_PGS0,
+ NETXEN_HW_CRB_HUB_AGT_ADR_PGS1,
+ NETXEN_HW_CRB_HUB_AGT_ADR_PGS2,
+ NETXEN_HW_CRB_HUB_AGT_ADR_PGS3,
+ 0,
+ NETXEN_HW_CRB_HUB_AGT_ADR_PGSI,
+ NETXEN_HW_CRB_HUB_AGT_ADR_SN,
+ 0,
+ NETXEN_HW_CRB_HUB_AGT_ADR_EG,
+ 0,
+ NETXEN_HW_CRB_HUB_AGT_ADR_PS,
+ NETXEN_HW_CRB_HUB_AGT_ADR_CAM,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ NETXEN_HW_CRB_HUB_AGT_ADR_TIMR,
+ 0,
+ NETXEN_HW_CRB_HUB_AGT_ADR_RPMX1,
+ NETXEN_HW_CRB_HUB_AGT_ADR_RPMX2,
+ NETXEN_HW_CRB_HUB_AGT_ADR_RPMX3,
+ NETXEN_HW_CRB_HUB_AGT_ADR_RPMX4,
+ NETXEN_HW_CRB_HUB_AGT_ADR_RPMX5,
+ NETXEN_HW_CRB_HUB_AGT_ADR_RPMX6,
+ NETXEN_HW_CRB_HUB_AGT_ADR_RPMX7,
+ NETXEN_HW_CRB_HUB_AGT_ADR_XDMA,
+ NETXEN_HW_CRB_HUB_AGT_ADR_I2Q,
+ NETXEN_HW_CRB_HUB_AGT_ADR_ROMUSB,
+ 0,
+ NETXEN_HW_CRB_HUB_AGT_ADR_RPMX0,
+ NETXEN_HW_CRB_HUB_AGT_ADR_RPMX8,
+ NETXEN_HW_CRB_HUB_AGT_ADR_RPMX9,
+ NETXEN_HW_CRB_HUB_AGT_ADR_OCM0,
+ 0,
+ NETXEN_HW_CRB_HUB_AGT_ADR_SMB,
+ NETXEN_HW_CRB_HUB_AGT_ADR_I2C0,
+ NETXEN_HW_CRB_HUB_AGT_ADR_I2C1,
+ 0,
+ NETXEN_HW_CRB_HUB_AGT_ADR_PGNC,
+ 0,
+};
+
+/* PCI Windowing for DDR regions. */
+
+#define NETXEN_WINDOW_ONE 0x2000000 /*CRB Window: bit 25 of CRB address */
+
+#define NETXEN_PCIE_SEM_TIMEOUT 10000
+
+static int netxen_nic_set_mtu_xgb(struct netxen_adapter *adapter, int new_mtu);
+
+int
+netxen_pcie_sem_lock(struct netxen_adapter *adapter, int sem, u32 id_reg)
+{
+ int done = 0, timeout = 0;
+
+ while (!done) {
+ done = NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM_LOCK(sem)));
+ if (done == 1)
+ break;
+ if (++timeout >= NETXEN_PCIE_SEM_TIMEOUT)
+ return -EIO;
+ msleep(1);
+ }
+
+ if (id_reg)
+ NXWR32(adapter, id_reg, adapter->portnum);
+
+ return 0;
+}
+
+void
+netxen_pcie_sem_unlock(struct netxen_adapter *adapter, int sem)
+{
+ NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM_UNLOCK(sem)));
+}
+
+static int netxen_niu_xg_init_port(struct netxen_adapter *adapter, int port)
+{
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+ NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_1+(0x10000*port), 0x1447);
+ NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_0+(0x10000*port), 0x5);
+ }
+
+ return 0;
+}
+
+/* Disable an XG interface */
+static int netxen_niu_disable_xg_port(struct netxen_adapter *adapter)
+{
+ __u32 mac_cfg;
+ u32 port = adapter->physical_port;
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ return 0;
+
+ if (port >= NETXEN_NIU_MAX_XG_PORTS)
+ return -EINVAL;
+
+ mac_cfg = 0;
+ if (NXWR32(adapter,
+ NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port), mac_cfg))
+ return -EIO;
+ return 0;
+}
+
+#define NETXEN_UNICAST_ADDR(port, index) \
+ (NETXEN_UNICAST_ADDR_BASE+(port*32)+(index*8))
+#define NETXEN_MCAST_ADDR(port, index) \
+ (NETXEN_MULTICAST_ADDR_BASE+(port*0x80)+(index*8))
+#define MAC_HI(addr) \
+ ((addr[2] << 16) | (addr[1] << 8) | (addr[0]))
+#define MAC_LO(addr) \
+ ((addr[5] << 16) | (addr[4] << 8) | (addr[3]))
+
+static int netxen_p2_nic_set_promisc(struct netxen_adapter *adapter, u32 mode)
+{
+ u32 mac_cfg;
+ u32 cnt = 0;
+ __u32 reg = 0x0200;
+ u32 port = adapter->physical_port;
+ u16 board_type = adapter->ahw.board_type;
+
+ if (port >= NETXEN_NIU_MAX_XG_PORTS)
+ return -EINVAL;
+
+ mac_cfg = NXRD32(adapter, NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port));
+ mac_cfg &= ~0x4;
+ NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port), mac_cfg);
+
+ if ((board_type == NETXEN_BRDTYPE_P2_SB31_10G_IMEZ) ||
+ (board_type == NETXEN_BRDTYPE_P2_SB31_10G_HMEZ))
+ reg = (0x20 << port);
+
+ NXWR32(adapter, NETXEN_NIU_FRAME_COUNT_SELECT, reg);
+
+ mdelay(10);
+
+ while (NXRD32(adapter, NETXEN_NIU_FRAME_COUNT) && ++cnt < 20)
+ mdelay(10);
+
+ if (cnt < 20) {
+
+ reg = NXRD32(adapter,
+ NETXEN_NIU_XGE_CONFIG_1 + (0x10000 * port));
+
+ if (mode == NETXEN_NIU_PROMISC_MODE)
+ reg = (reg | 0x2000UL);
+ else
+ reg = (reg & ~0x2000UL);
+
+ if (mode == NETXEN_NIU_ALLMULTI_MODE)
+ reg = (reg | 0x1000UL);
+ else
+ reg = (reg & ~0x1000UL);
+
+ NXWR32(adapter,
+ NETXEN_NIU_XGE_CONFIG_1 + (0x10000 * port), reg);
+ }
+
+ mac_cfg |= 0x4;
+ NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port), mac_cfg);
+
+ return 0;
+}
+
+static int netxen_p2_nic_set_mac_addr(struct netxen_adapter *adapter, u8 *addr)
+{
+ u32 mac_hi, mac_lo;
+ u32 reg_hi, reg_lo;
+
+ u8 phy = adapter->physical_port;
+
+ if (phy >= NETXEN_NIU_MAX_XG_PORTS)
+ return -EINVAL;
+
+ mac_lo = ((u32)addr[0] << 16) | ((u32)addr[1] << 24);
+ mac_hi = addr[2] | ((u32)addr[3] << 8) |
+ ((u32)addr[4] << 16) | ((u32)addr[5] << 24);
+
+ reg_lo = NETXEN_NIU_XGE_STATION_ADDR_0_1 + (0x10000 * phy);
+ reg_hi = NETXEN_NIU_XGE_STATION_ADDR_0_HI + (0x10000 * phy);
+
+ /* write twice to flush */
+ if (NXWR32(adapter, reg_lo, mac_lo) || NXWR32(adapter, reg_hi, mac_hi))
+ return -EIO;
+ if (NXWR32(adapter, reg_lo, mac_lo) || NXWR32(adapter, reg_hi, mac_hi))
+ return -EIO;
+
+ return 0;
+}
+
+static int
+netxen_nic_enable_mcast_filter(struct netxen_adapter *adapter)
+{
+ u32 val = 0;
+ u16 port = adapter->physical_port;
+ u8 *addr = adapter->mac_addr;
+
+ if (adapter->mc_enabled)
+ return 0;
+
+ val = NXRD32(adapter, NETXEN_MAC_ADDR_CNTL_REG);
+ val |= (1UL << (28+port));
+ NXWR32(adapter, NETXEN_MAC_ADDR_CNTL_REG, val);
+
+ /* add broadcast addr to filter */
+ val = 0xffffff;
+ NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 0), val);
+ NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 0)+4, val);
+
+ /* add station addr to filter */
+ val = MAC_HI(addr);
+ NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 1), val);
+ val = MAC_LO(addr);
+ NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 1)+4, val);
+
+ adapter->mc_enabled = 1;
+ return 0;
+}
+
+static int
+netxen_nic_disable_mcast_filter(struct netxen_adapter *adapter)
+{
+ u32 val = 0;
+ u16 port = adapter->physical_port;
+ u8 *addr = adapter->mac_addr;
+
+ if (!adapter->mc_enabled)
+ return 0;
+
+ val = NXRD32(adapter, NETXEN_MAC_ADDR_CNTL_REG);
+ val &= ~(1UL << (28+port));
+ NXWR32(adapter, NETXEN_MAC_ADDR_CNTL_REG, val);
+
+ val = MAC_HI(addr);
+ NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 0), val);
+ val = MAC_LO(addr);
+ NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 0)+4, val);
+
+ NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 1), 0);
+ NXWR32(adapter, NETXEN_UNICAST_ADDR(port, 1)+4, 0);
+
+ adapter->mc_enabled = 0;
+ return 0;
+}
+
+static int
+netxen_nic_set_mcast_addr(struct netxen_adapter *adapter,
+ int index, u8 *addr)
+{
+ u32 hi = 0, lo = 0;
+ u16 port = adapter->physical_port;
+
+ lo = MAC_LO(addr);
+ hi = MAC_HI(addr);
+
+ NXWR32(adapter, NETXEN_MCAST_ADDR(port, index), hi);
+ NXWR32(adapter, NETXEN_MCAST_ADDR(port, index)+4, lo);
+
+ return 0;
+}
+
+static void netxen_p2_nic_set_multi(struct net_device *netdev)
+{
+ struct netxen_adapter *adapter = netdev_priv(netdev);
+ struct netdev_hw_addr *ha;
+ u8 null_addr[ETH_ALEN];
+ int i;
+
+ eth_zero_addr(null_addr);
+
+ if (netdev->flags & IFF_PROMISC) {
+
+ adapter->set_promisc(adapter,
+ NETXEN_NIU_PROMISC_MODE);
+
+ /* Full promiscuous mode */
+ netxen_nic_disable_mcast_filter(adapter);
+
+ return;
+ }
+
+ if (netdev_mc_empty(netdev)) {
+ adapter->set_promisc(adapter,
+ NETXEN_NIU_NON_PROMISC_MODE);
+ netxen_nic_disable_mcast_filter(adapter);
+ return;
+ }
+
+ adapter->set_promisc(adapter, NETXEN_NIU_ALLMULTI_MODE);
+ if (netdev->flags & IFF_ALLMULTI ||
+ netdev_mc_count(netdev) > adapter->max_mc_count) {
+ netxen_nic_disable_mcast_filter(adapter);
+ return;
+ }
+
+ netxen_nic_enable_mcast_filter(adapter);
+
+ i = 0;
+ netdev_for_each_mc_addr(ha, netdev)
+ netxen_nic_set_mcast_addr(adapter, i++, ha->addr);
+
+ /* Clear out remaining addresses */
+ while (i < adapter->max_mc_count)
+ netxen_nic_set_mcast_addr(adapter, i++, null_addr);
+}
+
+static int
+netxen_send_cmd_descs(struct netxen_adapter *adapter,
+ struct cmd_desc_type0 *cmd_desc_arr, int nr_desc)
+{
+ u32 i, producer;
+ struct netxen_cmd_buffer *pbuf;
+ struct nx_host_tx_ring *tx_ring;
+
+ i = 0;
+
+ if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
+ return -EIO;
+
+ tx_ring = adapter->tx_ring;
+ __netif_tx_lock_bh(tx_ring->txq);
+
+ producer = tx_ring->producer;
+
+ if (nr_desc >= netxen_tx_avail(tx_ring)) {
+ netif_tx_stop_queue(tx_ring->txq);
+ smp_mb();
+ if (netxen_tx_avail(tx_ring) > nr_desc) {
+ if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH)
+ netif_tx_wake_queue(tx_ring->txq);
+ } else {
+ __netif_tx_unlock_bh(tx_ring->txq);
+ return -EBUSY;
+ }
+ }
+
+ do {
+ pbuf = &tx_ring->cmd_buf_arr[producer];
+ pbuf->skb = NULL;
+ pbuf->frag_count = 0;
+
+ memcpy(&tx_ring->desc_head[producer],
+ &cmd_desc_arr[i], sizeof(struct cmd_desc_type0));
+
+ producer = get_next_index(producer, tx_ring->num_desc);
+ i++;
+
+ } while (i != nr_desc);
+
+ tx_ring->producer = producer;
+
+ netxen_nic_update_cmd_producer(adapter, tx_ring);
+
+ __netif_tx_unlock_bh(tx_ring->txq);
+
+ return 0;
+}
+
+static int
+nx_p3_sre_macaddr_change(struct netxen_adapter *adapter, u8 *addr, unsigned op)
+{
+ nx_nic_req_t req;
+ nx_mac_req_t *mac_req;
+ u64 word;
+
+ memset(&req, 0, sizeof(nx_nic_req_t));
+ req.qhdr = cpu_to_le64(NX_NIC_REQUEST << 23);
+
+ word = NX_MAC_EVENT | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ mac_req = (nx_mac_req_t *)&req.words[0];
+ mac_req->op = op;
+ memcpy(mac_req->mac_addr, addr, ETH_ALEN);
+
+ return netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+}
+
+static int nx_p3_nic_add_mac(struct netxen_adapter *adapter,
+ const u8 *addr, struct list_head *del_list)
+{
+ struct list_head *head;
+ nx_mac_list_t *cur;
+
+ /* look up if already exists */
+ list_for_each(head, del_list) {
+ cur = list_entry(head, nx_mac_list_t, list);
+
+ if (ether_addr_equal(addr, cur->mac_addr)) {
+ list_move_tail(head, &adapter->mac_list);
+ return 0;
+ }
+ }
+
+ cur = kzalloc(sizeof(nx_mac_list_t), GFP_ATOMIC);
+ if (cur == NULL)
+ return -ENOMEM;
+
+ memcpy(cur->mac_addr, addr, ETH_ALEN);
+ list_add_tail(&cur->list, &adapter->mac_list);
+ return nx_p3_sre_macaddr_change(adapter,
+ cur->mac_addr, NETXEN_MAC_ADD);
+}
+
+static void netxen_p3_nic_set_multi(struct net_device *netdev)
+{
+ struct netxen_adapter *adapter = netdev_priv(netdev);
+ struct netdev_hw_addr *ha;
+ static const u8 bcast_addr[ETH_ALEN] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ };
+ u32 mode = VPORT_MISS_MODE_DROP;
+ LIST_HEAD(del_list);
+ struct list_head *head;
+ nx_mac_list_t *cur;
+
+ if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
+ return;
+
+ list_splice_tail_init(&adapter->mac_list, &del_list);
+
+ nx_p3_nic_add_mac(adapter, adapter->mac_addr, &del_list);
+ nx_p3_nic_add_mac(adapter, bcast_addr, &del_list);
+
+ if (netdev->flags & IFF_PROMISC) {
+ mode = VPORT_MISS_MODE_ACCEPT_ALL;
+ goto send_fw_cmd;
+ }
+
+ if ((netdev->flags & IFF_ALLMULTI) ||
+ (netdev_mc_count(netdev) > adapter->max_mc_count)) {
+ mode = VPORT_MISS_MODE_ACCEPT_MULTI;
+ goto send_fw_cmd;
+ }
+
+ if (!netdev_mc_empty(netdev)) {
+ netdev_for_each_mc_addr(ha, netdev)
+ nx_p3_nic_add_mac(adapter, ha->addr, &del_list);
+ }
+
+send_fw_cmd:
+ adapter->set_promisc(adapter, mode);
+ head = &del_list;
+ while (!list_empty(head)) {
+ cur = list_entry(head->next, nx_mac_list_t, list);
+
+ nx_p3_sre_macaddr_change(adapter,
+ cur->mac_addr, NETXEN_MAC_DEL);
+ list_del(&cur->list);
+ kfree(cur);
+ }
+}
+
+static int netxen_p3_nic_set_promisc(struct netxen_adapter *adapter, u32 mode)
+{
+ nx_nic_req_t req;
+ u64 word;
+
+ memset(&req, 0, sizeof(nx_nic_req_t));
+
+ req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23);
+
+ word = NX_NIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE |
+ ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ req.words[0] = cpu_to_le64(mode);
+
+ return netxen_send_cmd_descs(adapter,
+ (struct cmd_desc_type0 *)&req, 1);
+}
+
+void netxen_p3_free_mac_list(struct netxen_adapter *adapter)
+{
+ nx_mac_list_t *cur;
+ struct list_head *head = &adapter->mac_list;
+
+ while (!list_empty(head)) {
+ cur = list_entry(head->next, nx_mac_list_t, list);
+ nx_p3_sre_macaddr_change(adapter,
+ cur->mac_addr, NETXEN_MAC_DEL);
+ list_del(&cur->list);
+ kfree(cur);
+ }
+}
+
+static int netxen_p3_nic_set_mac_addr(struct netxen_adapter *adapter, u8 *addr)
+{
+ /* assuming caller has already copied new addr to netdev */
+ netxen_p3_nic_set_multi(adapter->netdev);
+ return 0;
+}
+
+#define NETXEN_CONFIG_INTR_COALESCE 3
+
+/*
+ * Send the interrupt coalescing parameter set by ethtool to the card.
+ */
+int netxen_config_intr_coalesce(struct netxen_adapter *adapter)
+{
+ nx_nic_req_t req;
+ u64 word[6];
+ int rv, i;
+
+ memset(&req, 0, sizeof(nx_nic_req_t));
+ memset(word, 0, sizeof(word));
+
+ req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23);
+
+ word[0] = NETXEN_CONFIG_INTR_COALESCE | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word[0]);
+
+ memcpy(&word[0], &adapter->coal, sizeof(adapter->coal));
+ for (i = 0; i < 6; i++)
+ req.words[i] = cpu_to_le64(word[i]);
+
+ rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0) {
+ printk(KERN_ERR "ERROR. Could not send "
+ "interrupt coalescing parameters\n");
+ }
+
+ return rv;
+}
+
+int netxen_config_hw_lro(struct netxen_adapter *adapter, int enable)
+{
+ nx_nic_req_t req;
+ u64 word;
+ int rv = 0;
+
+ if (!test_bit(__NX_FW_ATTACHED, &adapter->state))
+ return 0;
+
+ memset(&req, 0, sizeof(nx_nic_req_t));
+
+ req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23);
+
+ word = NX_NIC_H2C_OPCODE_CONFIG_HW_LRO | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ req.words[0] = cpu_to_le64(enable);
+
+ rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0) {
+ printk(KERN_ERR "ERROR. Could not send "
+ "configure hw lro request\n");
+ }
+
+ return rv;
+}
+
+int netxen_config_bridged_mode(struct netxen_adapter *adapter, int enable)
+{
+ nx_nic_req_t req;
+ u64 word;
+ int rv = 0;
+
+ if (!!(adapter->flags & NETXEN_NIC_BRIDGE_ENABLED) == enable)
+ return rv;
+
+ memset(&req, 0, sizeof(nx_nic_req_t));
+
+ req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23);
+
+ word = NX_NIC_H2C_OPCODE_CONFIG_BRIDGING |
+ ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ req.words[0] = cpu_to_le64(enable);
+
+ rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0) {
+ printk(KERN_ERR "ERROR. Could not send "
+ "configure bridge mode request\n");
+ }
+
+ adapter->flags ^= NETXEN_NIC_BRIDGE_ENABLED;
+
+ return rv;
+}
+
+
+#define RSS_HASHTYPE_IP_TCP 0x3
+
+int netxen_config_rss(struct netxen_adapter *adapter, int enable)
+{
+ nx_nic_req_t req;
+ u64 word;
+ int i, rv;
+
+ static const u64 key[] = {
+ 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL,
+ 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
+ 0x255b0ec26d5a56daULL
+ };
+
+
+ memset(&req, 0, sizeof(nx_nic_req_t));
+ req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23);
+
+ word = NX_NIC_H2C_OPCODE_CONFIG_RSS | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ /*
+ * RSS request:
+ * bits 3-0: hash_method
+ * 5-4: hash_type_ipv4
+ * 7-6: hash_type_ipv6
+ * 8: enable
+ * 9: use indirection table
+ * 47-10: reserved
+ * 63-48: indirection table mask
+ */
+ word = ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 4) |
+ ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 6) |
+ ((u64)(enable & 0x1) << 8) |
+ ((0x7ULL) << 48);
+ req.words[0] = cpu_to_le64(word);
+ for (i = 0; i < ARRAY_SIZE(key); i++)
+ req.words[i+1] = cpu_to_le64(key[i]);
+
+
+ rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0) {
+ printk(KERN_ERR "%s: could not configure RSS\n",
+ adapter->netdev->name);
+ }
+
+ return rv;
+}
+
+int netxen_config_ipaddr(struct netxen_adapter *adapter, __be32 ip, int cmd)
+{
+ nx_nic_req_t req;
+ u64 word;
+ int rv;
+
+ memset(&req, 0, sizeof(nx_nic_req_t));
+ req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23);
+
+ word = NX_NIC_H2C_OPCODE_CONFIG_IPADDR | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ req.words[0] = cpu_to_le64(cmd);
+ memcpy(&req.words[1], &ip, sizeof(u32));
+
+ rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0) {
+ printk(KERN_ERR "%s: could not notify %s IP 0x%x request\n",
+ adapter->netdev->name,
+ (cmd == NX_IP_UP) ? "Add" : "Remove", ip);
+ }
+ return rv;
+}
+
+int netxen_linkevent_request(struct netxen_adapter *adapter, int enable)
+{
+ nx_nic_req_t req;
+ u64 word;
+ int rv;
+
+ memset(&req, 0, sizeof(nx_nic_req_t));
+ req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23);
+
+ word = NX_NIC_H2C_OPCODE_GET_LINKEVENT | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+ req.words[0] = cpu_to_le64(enable | (enable << 8));
+
+ rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0) {
+ printk(KERN_ERR "%s: could not configure link notification\n",
+ adapter->netdev->name);
+ }
+
+ return rv;
+}
+
+int netxen_send_lro_cleanup(struct netxen_adapter *adapter)
+{
+ nx_nic_req_t req;
+ u64 word;
+ int rv;
+
+ if (!test_bit(__NX_FW_ATTACHED, &adapter->state))
+ return 0;
+
+ memset(&req, 0, sizeof(nx_nic_req_t));
+ req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23);
+
+ word = NX_NIC_H2C_OPCODE_LRO_REQUEST |
+ ((u64)adapter->portnum << 16) |
+ ((u64)NX_NIC_LRO_REQUEST_CLEANUP << 56) ;
+
+ req.req_hdr = cpu_to_le64(word);
+
+ rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0) {
+ printk(KERN_ERR "%s: could not cleanup lro flows\n",
+ adapter->netdev->name);
+ }
+ return rv;
+}
+
+/*
+ * netxen_nic_change_mtu - Change the Maximum Transfer Unit
+ * @returns 0 on success, negative on failure
+ */
+
+#define MTU_FUDGE_FACTOR 100
+
+int netxen_nic_change_mtu(struct net_device *netdev, int mtu)
+{
+ struct netxen_adapter *adapter = netdev_priv(netdev);
+ int rc = 0;
+
+ if (adapter->set_mtu)
+ rc = adapter->set_mtu(adapter, mtu);
+
+ if (!rc)
+ netdev->mtu = mtu;
+
+ return rc;
+}
+
+static int netxen_get_flash_block(struct netxen_adapter *adapter, int base,
+ int size, __le32 * buf)
+{
+ int i, v, addr;
+ __le32 *ptr32;
+ int ret;
+
+ addr = base;
+ ptr32 = buf;
+ for (i = 0; i < size / sizeof(u32); i++) {
+ ret = netxen_rom_fast_read(adapter, addr, &v);
+ if (ret)
+ return ret;
+
+ *ptr32 = cpu_to_le32(v);
+ ptr32++;
+ addr += sizeof(u32);
+ }
+ if ((char *)buf + size > (char *)ptr32) {
+ __le32 local;
+ ret = netxen_rom_fast_read(adapter, addr, &v);
+ if (ret)
+ return ret;
+ local = cpu_to_le32(v);
+ memcpy(ptr32, &local, (char *)buf + size - (char *)ptr32);
+ }
+
+ return 0;
+}
+
+int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, u64 *mac)
+{
+ __le32 *pmac = (__le32 *) mac;
+ u32 offset;
+
+ offset = NX_FW_MAC_ADDR_OFFSET + (adapter->portnum * sizeof(u64));
+
+ if (netxen_get_flash_block(adapter, offset, sizeof(u64), pmac) == -1)
+ return -1;
+
+ if (*mac == ~0ULL) {
+
+ offset = NX_OLD_MAC_ADDR_OFFSET +
+ (adapter->portnum * sizeof(u64));
+
+ if (netxen_get_flash_block(adapter,
+ offset, sizeof(u64), pmac) == -1)
+ return -1;
+
+ if (*mac == ~0ULL)
+ return -1;
+ }
+ return 0;
+}
+
+int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, u64 *mac)
+{
+ uint32_t crbaddr, mac_hi, mac_lo;
+ int pci_func = adapter->ahw.pci_func;
+
+ crbaddr = CRB_MAC_BLOCK_START +
+ (4 * ((pci_func/2) * 3)) + (4 * (pci_func & 1));
+
+ mac_lo = NXRD32(adapter, crbaddr);
+ mac_hi = NXRD32(adapter, crbaddr+4);
+
+ if (pci_func & 1)
+ *mac = le64_to_cpu((mac_lo >> 16) | ((u64)mac_hi << 16));
+ else
+ *mac = le64_to_cpu((u64)mac_lo | ((u64)mac_hi << 32));
+
+ return 0;
+}
+
+/*
+ * Changes the CRB window to the specified window.
+ */
+static void
+netxen_nic_pci_set_crbwindow_128M(struct netxen_adapter *adapter,
+ u32 window)
+{
+ void __iomem *offset;
+ int count = 10;
+ u8 func = adapter->ahw.pci_func;
+
+ if (adapter->ahw.crb_win == window)
+ return;
+
+ offset = PCI_OFFSET_SECOND_RANGE(adapter,
+ NETXEN_PCIX_PH_REG(PCIE_CRB_WINDOW_REG(func)));
+
+ writel(window, offset);
+ do {
+ if (window == readl(offset))
+ break;
+
+ if (printk_ratelimit())
+ dev_warn(&adapter->pdev->dev,
+ "failed to set CRB window to %d\n",
+ (window == NETXEN_WINDOW_ONE));
+ udelay(1);
+
+ } while (--count > 0);
+
+ if (count > 0)
+ adapter->ahw.crb_win = window;
+}
+
+/*
+ * Returns < 0 if off is not valid,
+ * 1 if window access is needed. 'off' is set to offset from
+ * CRB space in 128M pci map
+ * 0 if no window access is needed. 'off' is set to 2M addr
+ * In: 'off' is offset from base in 128M pci map
+ */
+static int
+netxen_nic_pci_get_crb_addr_2M(struct netxen_adapter *adapter,
+ ulong off, void __iomem **addr)
+{
+ crb_128M_2M_sub_block_map_t *m;
+
+
+ if ((off >= NETXEN_CRB_MAX) || (off < NETXEN_PCI_CRBSPACE))
+ return -EINVAL;
+
+ off -= NETXEN_PCI_CRBSPACE;
+
+ /*
+ * Try direct map
+ */
+ m = &crb_128M_2M_map[CRB_BLK(off)].sub_block[CRB_SUBBLK(off)];
+
+ if (m->valid && (m->start_128M <= off) && (m->end_128M > off)) {
+ *addr = adapter->ahw.pci_base0 + m->start_2M +
+ (off - m->start_128M);
+ return 0;
+ }
+
+ /*
+ * Not in direct map, use crb window
+ */
+ *addr = adapter->ahw.pci_base0 + CRB_INDIRECT_2M +
+ (off & MASK(16));
+ return 1;
+}
+
+/*
+ * In: 'off' is offset from CRB space in 128M pci map
+ * Out: 'off' is 2M pci map addr
+ * side effect: lock crb window
+ */
+static void
+netxen_nic_pci_set_crbwindow_2M(struct netxen_adapter *adapter, ulong off)
+{
+ u32 window;
+ void __iomem *addr = adapter->ahw.pci_base0 + CRB_WINDOW_2M;
+
+ off -= NETXEN_PCI_CRBSPACE;
+
+ window = CRB_HI(off);
+
+ writel(window, addr);
+ if (readl(addr) != window) {
+ if (printk_ratelimit())
+ dev_warn(&adapter->pdev->dev,
+ "failed to set CRB window to %d off 0x%lx\n",
+ window, off);
+ }
+}
+
+static void __iomem *
+netxen_nic_map_indirect_address_128M(struct netxen_adapter *adapter,
+ ulong win_off, void __iomem **mem_ptr)
+{
+ ulong off = win_off;
+ void __iomem *addr;
+ resource_size_t mem_base;
+
+ if (ADDR_IN_WINDOW1(win_off))
+ off = NETXEN_CRB_NORMAL(win_off);
+
+ addr = pci_base_offset(adapter, off);
+ if (addr)
+ return addr;
+
+ if (adapter->ahw.pci_len0 == 0)
+ off -= NETXEN_PCI_CRBSPACE;
+
+ mem_base = pci_resource_start(adapter->pdev, 0);
+ *mem_ptr = ioremap(mem_base + (off & PAGE_MASK), PAGE_SIZE);
+ if (*mem_ptr)
+ addr = *mem_ptr + (off & (PAGE_SIZE - 1));
+
+ return addr;
+}
+
+static int
+netxen_nic_hw_write_wx_128M(struct netxen_adapter *adapter, ulong off, u32 data)
+{
+ unsigned long flags;
+ void __iomem *addr, *mem_ptr = NULL;
+
+ addr = netxen_nic_map_indirect_address_128M(adapter, off, &mem_ptr);
+ if (!addr)
+ return -EIO;
+
+ if (ADDR_IN_WINDOW1(off)) { /* Window 1 */
+ netxen_nic_io_write_128M(adapter, addr, data);
+ } else { /* Window 0 */
+ write_lock_irqsave(&adapter->ahw.crb_lock, flags);
+ netxen_nic_pci_set_crbwindow_128M(adapter, 0);
+ writel(data, addr);
+ netxen_nic_pci_set_crbwindow_128M(adapter,
+ NETXEN_WINDOW_ONE);
+ write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
+ }
+
+ if (mem_ptr)
+ iounmap(mem_ptr);
+
+ return 0;
+}
+
+static u32
+netxen_nic_hw_read_wx_128M(struct netxen_adapter *adapter, ulong off)
+{
+ unsigned long flags;
+ void __iomem *addr, *mem_ptr = NULL;
+ u32 data;
+
+ addr = netxen_nic_map_indirect_address_128M(adapter, off, &mem_ptr);
+ if (!addr)
+ return -EIO;
+
+ if (ADDR_IN_WINDOW1(off)) { /* Window 1 */
+ data = netxen_nic_io_read_128M(adapter, addr);
+ } else { /* Window 0 */
+ write_lock_irqsave(&adapter->ahw.crb_lock, flags);
+ netxen_nic_pci_set_crbwindow_128M(adapter, 0);
+ data = readl(addr);
+ netxen_nic_pci_set_crbwindow_128M(adapter,
+ NETXEN_WINDOW_ONE);
+ write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
+ }
+
+ if (mem_ptr)
+ iounmap(mem_ptr);
+
+ return data;
+}
+
+static int
+netxen_nic_hw_write_wx_2M(struct netxen_adapter *adapter, ulong off, u32 data)
+{
+ unsigned long flags;
+ int rv;
+ void __iomem *addr = NULL;
+
+ rv = netxen_nic_pci_get_crb_addr_2M(adapter, off, &addr);
+
+ if (rv == 0) {
+ writel(data, addr);
+ return 0;
+ }
+
+ if (rv > 0) {
+ /* indirect access */
+ write_lock_irqsave(&adapter->ahw.crb_lock, flags);
+ crb_win_lock(adapter);
+ netxen_nic_pci_set_crbwindow_2M(adapter, off);
+ writel(data, addr);
+ crb_win_unlock(adapter);
+ write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
+ return 0;
+ }
+
+ dev_err(&adapter->pdev->dev,
+ "%s: invalid offset: 0x%016lx\n", __func__, off);
+ dump_stack();
+ return -EIO;
+}
+
+static u32
+netxen_nic_hw_read_wx_2M(struct netxen_adapter *adapter, ulong off)
+{
+ unsigned long flags;
+ int rv;
+ u32 data;
+ void __iomem *addr = NULL;
+
+ rv = netxen_nic_pci_get_crb_addr_2M(adapter, off, &addr);
+
+ if (rv == 0)
+ return readl(addr);
+
+ if (rv > 0) {
+ /* indirect access */
+ write_lock_irqsave(&adapter->ahw.crb_lock, flags);
+ crb_win_lock(adapter);
+ netxen_nic_pci_set_crbwindow_2M(adapter, off);
+ data = readl(addr);
+ crb_win_unlock(adapter);
+ write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
+ return data;
+ }
+
+ dev_err(&adapter->pdev->dev,
+ "%s: invalid offset: 0x%016lx\n", __func__, off);
+ dump_stack();
+ return -1;
+}
+
+/* window 1 registers only */
+static void netxen_nic_io_write_128M(struct netxen_adapter *adapter,
+ void __iomem *addr, u32 data)
+{
+ read_lock(&adapter->ahw.crb_lock);
+ writel(data, addr);
+ read_unlock(&adapter->ahw.crb_lock);
+}
+
+static u32 netxen_nic_io_read_128M(struct netxen_adapter *adapter,
+ void __iomem *addr)
+{
+ u32 val;
+
+ read_lock(&adapter->ahw.crb_lock);
+ val = readl(addr);
+ read_unlock(&adapter->ahw.crb_lock);
+
+ return val;
+}
+
+static void netxen_nic_io_write_2M(struct netxen_adapter *adapter,
+ void __iomem *addr, u32 data)
+{
+ writel(data, addr);
+}
+
+static u32 netxen_nic_io_read_2M(struct netxen_adapter *adapter,
+ void __iomem *addr)
+{
+ return readl(addr);
+}
+
+void __iomem *
+netxen_get_ioaddr(struct netxen_adapter *adapter, u32 offset)
+{
+ void __iomem *addr = NULL;
+
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+ if ((offset < NETXEN_CRB_PCIX_HOST2) &&
+ (offset > NETXEN_CRB_PCIX_HOST))
+ addr = PCI_OFFSET_SECOND_RANGE(adapter, offset);
+ else
+ addr = NETXEN_CRB_NORMALIZE(adapter, offset);
+ } else {
+ WARN_ON(netxen_nic_pci_get_crb_addr_2M(adapter,
+ offset, &addr));
+ }
+
+ return addr;
+}
+
+static int
+netxen_nic_pci_set_window_128M(struct netxen_adapter *adapter,
+ u64 addr, u32 *start)
+{
+ if (ADDR_IN_RANGE(addr, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX)) {
+ *start = (addr - NETXEN_ADDR_OCM0 + NETXEN_PCI_OCM0);
+ return 0;
+ } else if (ADDR_IN_RANGE(addr,
+ NETXEN_ADDR_OCM1, NETXEN_ADDR_OCM1_MAX)) {
+ *start = (addr - NETXEN_ADDR_OCM1 + NETXEN_PCI_OCM1);
+ return 0;
+ }
+
+ return -EIO;
+}
+
+static int
+netxen_nic_pci_set_window_2M(struct netxen_adapter *adapter,
+ u64 addr, u32 *start)
+{
+ u32 window;
+
+ window = OCM_WIN(addr);
+
+ writel(window, adapter->ahw.ocm_win_crb);
+ /* read back to flush */
+ readl(adapter->ahw.ocm_win_crb);
+
+ adapter->ahw.ocm_win = window;
+ *start = NETXEN_PCI_OCM0_2M + GET_MEM_OFFS_2M(addr);
+ return 0;
+}
+
+static int
+netxen_nic_pci_mem_access_direct(struct netxen_adapter *adapter, u64 off,
+ u64 *data, int op)
+{
+ void __iomem *addr, *mem_ptr = NULL;
+ resource_size_t mem_base;
+ int ret;
+ u32 start;
+
+ spin_lock(&adapter->ahw.mem_lock);
+
+ ret = adapter->pci_set_window(adapter, off, &start);
+ if (ret != 0)
+ goto unlock;
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+ addr = adapter->ahw.pci_base0 + start;
+ } else {
+ addr = pci_base_offset(adapter, start);
+ if (addr)
+ goto noremap;
+
+ mem_base = pci_resource_start(adapter->pdev, 0) +
+ (start & PAGE_MASK);
+ mem_ptr = ioremap(mem_base, PAGE_SIZE);
+ if (mem_ptr == NULL) {
+ ret = -EIO;
+ goto unlock;
+ }
+
+ addr = mem_ptr + (start & (PAGE_SIZE-1));
+ }
+noremap:
+ if (op == 0) /* read */
+ *data = readq(addr);
+ else /* write */
+ writeq(*data, addr);
+
+unlock:
+ spin_unlock(&adapter->ahw.mem_lock);
+
+ if (mem_ptr)
+ iounmap(mem_ptr);
+ return ret;
+}
+
+void
+netxen_pci_camqm_read_2M(struct netxen_adapter *adapter, u64 off, u64 *data)
+{
+ void __iomem *addr = adapter->ahw.pci_base0 +
+ NETXEN_PCI_CAMQM_2M_BASE + (off - NETXEN_PCI_CAMQM);
+
+ spin_lock(&adapter->ahw.mem_lock);
+ *data = readq(addr);
+ spin_unlock(&adapter->ahw.mem_lock);
+}
+
+void
+netxen_pci_camqm_write_2M(struct netxen_adapter *adapter, u64 off, u64 data)
+{
+ void __iomem *addr = adapter->ahw.pci_base0 +
+ NETXEN_PCI_CAMQM_2M_BASE + (off - NETXEN_PCI_CAMQM);
+
+ spin_lock(&adapter->ahw.mem_lock);
+ writeq(data, addr);
+ spin_unlock(&adapter->ahw.mem_lock);
+}
+
+#define MAX_CTL_CHECK 1000
+
+static int
+netxen_nic_pci_mem_write_128M(struct netxen_adapter *adapter,
+ u64 off, u64 data)
+{
+ int j, ret;
+ u32 temp, off_lo, off_hi, addr_hi, data_hi, data_lo;
+ void __iomem *mem_crb;
+
+ /* Only 64-bit aligned access */
+ if (off & 7)
+ return -EIO;
+
+ /* P2 has different SIU and MIU test agent base addr */
+ if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET,
+ NETXEN_ADDR_QDR_NET_MAX_P2)) {
+ mem_crb = pci_base_offset(adapter,
+ NETXEN_CRB_QDR_NET+SIU_TEST_AGT_BASE);
+ addr_hi = SIU_TEST_AGT_ADDR_HI;
+ data_lo = SIU_TEST_AGT_WRDATA_LO;
+ data_hi = SIU_TEST_AGT_WRDATA_HI;
+ off_lo = off & SIU_TEST_AGT_ADDR_MASK;
+ off_hi = SIU_TEST_AGT_UPPER_ADDR(off);
+ goto correct;
+ }
+
+ if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) {
+ mem_crb = pci_base_offset(adapter,
+ NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE);
+ addr_hi = MIU_TEST_AGT_ADDR_HI;
+ data_lo = MIU_TEST_AGT_WRDATA_LO;
+ data_hi = MIU_TEST_AGT_WRDATA_HI;
+ off_lo = off & MIU_TEST_AGT_ADDR_MASK;
+ off_hi = 0;
+ goto correct;
+ }
+
+ if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX) ||
+ ADDR_IN_RANGE(off, NETXEN_ADDR_OCM1, NETXEN_ADDR_OCM1_MAX)) {
+ if (adapter->ahw.pci_len0 != 0) {
+ return netxen_nic_pci_mem_access_direct(adapter,
+ off, &data, 1);
+ }
+ }
+
+ return -EIO;
+
+correct:
+ spin_lock(&adapter->ahw.mem_lock);
+ netxen_nic_pci_set_crbwindow_128M(adapter, 0);
+
+ writel(off_lo, (mem_crb + MIU_TEST_AGT_ADDR_LO));
+ writel(off_hi, (mem_crb + addr_hi));
+ writel(data & 0xffffffff, (mem_crb + data_lo));
+ writel((data >> 32) & 0xffffffff, (mem_crb + data_hi));
+ writel((TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL));
+ writel((TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE),
+ (mem_crb + TEST_AGT_CTRL));
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ temp = readl((mem_crb + TEST_AGT_CTRL));
+ if ((temp & TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ if (j >= MAX_CTL_CHECK) {
+ if (printk_ratelimit())
+ dev_err(&adapter->pdev->dev,
+ "failed to write through agent\n");
+ ret = -EIO;
+ } else
+ ret = 0;
+
+ netxen_nic_pci_set_crbwindow_128M(adapter, NETXEN_WINDOW_ONE);
+ spin_unlock(&adapter->ahw.mem_lock);
+ return ret;
+}
+
+static int
+netxen_nic_pci_mem_read_128M(struct netxen_adapter *adapter,
+ u64 off, u64 *data)
+{
+ int j, ret;
+ u32 temp, off_lo, off_hi, addr_hi, data_hi, data_lo;
+ u64 val;
+ void __iomem *mem_crb;
+
+ /* Only 64-bit aligned access */
+ if (off & 7)
+ return -EIO;
+
+ /* P2 has different SIU and MIU test agent base addr */
+ if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET,
+ NETXEN_ADDR_QDR_NET_MAX_P2)) {
+ mem_crb = pci_base_offset(adapter,
+ NETXEN_CRB_QDR_NET+SIU_TEST_AGT_BASE);
+ addr_hi = SIU_TEST_AGT_ADDR_HI;
+ data_lo = SIU_TEST_AGT_RDDATA_LO;
+ data_hi = SIU_TEST_AGT_RDDATA_HI;
+ off_lo = off & SIU_TEST_AGT_ADDR_MASK;
+ off_hi = SIU_TEST_AGT_UPPER_ADDR(off);
+ goto correct;
+ }
+
+ if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) {
+ mem_crb = pci_base_offset(adapter,
+ NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE);
+ addr_hi = MIU_TEST_AGT_ADDR_HI;
+ data_lo = MIU_TEST_AGT_RDDATA_LO;
+ data_hi = MIU_TEST_AGT_RDDATA_HI;
+ off_lo = off & MIU_TEST_AGT_ADDR_MASK;
+ off_hi = 0;
+ goto correct;
+ }
+
+ if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX) ||
+ ADDR_IN_RANGE(off, NETXEN_ADDR_OCM1, NETXEN_ADDR_OCM1_MAX)) {
+ if (adapter->ahw.pci_len0 != 0) {
+ return netxen_nic_pci_mem_access_direct(adapter,
+ off, data, 0);
+ }
+ }
+
+ return -EIO;
+
+correct:
+ spin_lock(&adapter->ahw.mem_lock);
+ netxen_nic_pci_set_crbwindow_128M(adapter, 0);
+
+ writel(off_lo, (mem_crb + MIU_TEST_AGT_ADDR_LO));
+ writel(off_hi, (mem_crb + addr_hi));
+ writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
+ writel((TA_CTL_START|TA_CTL_ENABLE), (mem_crb + TEST_AGT_CTRL));
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ temp = readl(mem_crb + TEST_AGT_CTRL);
+ if ((temp & TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ if (j >= MAX_CTL_CHECK) {
+ if (printk_ratelimit())
+ dev_err(&adapter->pdev->dev,
+ "failed to read through agent\n");
+ ret = -EIO;
+ } else {
+
+ temp = readl(mem_crb + data_hi);
+ val = ((u64)temp << 32);
+ val |= readl(mem_crb + data_lo);
+ *data = val;
+ ret = 0;
+ }
+
+ netxen_nic_pci_set_crbwindow_128M(adapter, NETXEN_WINDOW_ONE);
+ spin_unlock(&adapter->ahw.mem_lock);
+
+ return ret;
+}
+
+static int
+netxen_nic_pci_mem_write_2M(struct netxen_adapter *adapter,
+ u64 off, u64 data)
+{
+ int j, ret;
+ u32 temp, off8;
+ void __iomem *mem_crb;
+
+ /* Only 64-bit aligned access */
+ if (off & 7)
+ return -EIO;
+
+ /* P3 onward, test agent base for MIU and SIU is same */
+ if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET,
+ NETXEN_ADDR_QDR_NET_MAX_P3)) {
+ mem_crb = netxen_get_ioaddr(adapter,
+ NETXEN_CRB_QDR_NET+MIU_TEST_AGT_BASE);
+ goto correct;
+ }
+
+ if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) {
+ mem_crb = netxen_get_ioaddr(adapter,
+ NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE);
+ goto correct;
+ }
+
+ if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX))
+ return netxen_nic_pci_mem_access_direct(adapter, off, &data, 1);
+
+ return -EIO;
+
+correct:
+ off8 = off & 0xfffffff8;
+
+ spin_lock(&adapter->ahw.mem_lock);
+
+ writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO));
+ writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
+
+ writel(data & 0xffffffff,
+ mem_crb + MIU_TEST_AGT_WRDATA_LO);
+ writel((data >> 32) & 0xffffffff,
+ mem_crb + MIU_TEST_AGT_WRDATA_HI);
+
+ writel((TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL));
+ writel((TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE),
+ (mem_crb + TEST_AGT_CTRL));
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ temp = readl(mem_crb + TEST_AGT_CTRL);
+ if ((temp & TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ if (j >= MAX_CTL_CHECK) {
+ if (printk_ratelimit())
+ dev_err(&adapter->pdev->dev,
+ "failed to write through agent\n");
+ ret = -EIO;
+ } else
+ ret = 0;
+
+ spin_unlock(&adapter->ahw.mem_lock);
+
+ return ret;
+}
+
+static int
+netxen_nic_pci_mem_read_2M(struct netxen_adapter *adapter,
+ u64 off, u64 *data)
+{
+ int j, ret;
+ u32 temp, off8;
+ u64 val;
+ void __iomem *mem_crb;
+
+ /* Only 64-bit aligned access */
+ if (off & 7)
+ return -EIO;
+
+ /* P3 onward, test agent base for MIU and SIU is same */
+ if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET,
+ NETXEN_ADDR_QDR_NET_MAX_P3)) {
+ mem_crb = netxen_get_ioaddr(adapter,
+ NETXEN_CRB_QDR_NET+MIU_TEST_AGT_BASE);
+ goto correct;
+ }
+
+ if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) {
+ mem_crb = netxen_get_ioaddr(adapter,
+ NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE);
+ goto correct;
+ }
+
+ if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX)) {
+ return netxen_nic_pci_mem_access_direct(adapter,
+ off, data, 0);
+ }
+
+ return -EIO;
+
+correct:
+ off8 = off & 0xfffffff8;
+
+ spin_lock(&adapter->ahw.mem_lock);
+
+ writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO));
+ writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
+ writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
+ writel((TA_CTL_START | TA_CTL_ENABLE), (mem_crb + TEST_AGT_CTRL));
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ temp = readl(mem_crb + TEST_AGT_CTRL);
+ if ((temp & TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ if (j >= MAX_CTL_CHECK) {
+ if (printk_ratelimit())
+ dev_err(&adapter->pdev->dev,
+ "failed to read through agent\n");
+ ret = -EIO;
+ } else {
+ val = (u64)(readl(mem_crb + MIU_TEST_AGT_RDDATA_HI)) << 32;
+ val |= readl(mem_crb + MIU_TEST_AGT_RDDATA_LO);
+ *data = val;
+ ret = 0;
+ }
+
+ spin_unlock(&adapter->ahw.mem_lock);
+
+ return ret;
+}
+
+void
+netxen_setup_hwops(struct netxen_adapter *adapter)
+{
+ adapter->init_port = netxen_niu_xg_init_port;
+ adapter->stop_port = netxen_niu_disable_xg_port;
+
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+ adapter->crb_read = netxen_nic_hw_read_wx_128M,
+ adapter->crb_write = netxen_nic_hw_write_wx_128M,
+ adapter->pci_set_window = netxen_nic_pci_set_window_128M,
+ adapter->pci_mem_read = netxen_nic_pci_mem_read_128M,
+ adapter->pci_mem_write = netxen_nic_pci_mem_write_128M,
+ adapter->io_read = netxen_nic_io_read_128M,
+ adapter->io_write = netxen_nic_io_write_128M,
+
+ adapter->macaddr_set = netxen_p2_nic_set_mac_addr;
+ adapter->set_multi = netxen_p2_nic_set_multi;
+ adapter->set_mtu = netxen_nic_set_mtu_xgb;
+ adapter->set_promisc = netxen_p2_nic_set_promisc;
+
+ } else {
+ adapter->crb_read = netxen_nic_hw_read_wx_2M,
+ adapter->crb_write = netxen_nic_hw_write_wx_2M,
+ adapter->pci_set_window = netxen_nic_pci_set_window_2M,
+ adapter->pci_mem_read = netxen_nic_pci_mem_read_2M,
+ adapter->pci_mem_write = netxen_nic_pci_mem_write_2M,
+ adapter->io_read = netxen_nic_io_read_2M,
+ adapter->io_write = netxen_nic_io_write_2M,
+
+ adapter->set_mtu = nx_fw_cmd_set_mtu;
+ adapter->set_promisc = netxen_p3_nic_set_promisc;
+ adapter->macaddr_set = netxen_p3_nic_set_mac_addr;
+ adapter->set_multi = netxen_p3_nic_set_multi;
+
+ adapter->phy_read = nx_fw_cmd_query_phy;
+ adapter->phy_write = nx_fw_cmd_set_phy;
+ }
+}
+
+int netxen_nic_get_board_info(struct netxen_adapter *adapter)
+{
+ int offset, board_type, magic;
+ struct pci_dev *pdev = adapter->pdev;
+
+ offset = NX_FW_MAGIC_OFFSET;
+ if (netxen_rom_fast_read(adapter, offset, &magic))
+ return -EIO;
+
+ if (magic != NETXEN_BDINFO_MAGIC) {
+ dev_err(&pdev->dev, "invalid board config, magic=%08x\n",
+ magic);
+ return -EIO;
+ }
+
+ offset = NX_BRDTYPE_OFFSET;
+ if (netxen_rom_fast_read(adapter, offset, &board_type))
+ return -EIO;
+
+ if (board_type == NETXEN_BRDTYPE_P3_4_GB_MM) {
+ u32 gpio = NXRD32(adapter, NETXEN_ROMUSB_GLB_PAD_GPIO_I);
+ if ((gpio & 0x8000) == 0)
+ board_type = NETXEN_BRDTYPE_P3_10G_TP;
+ }
+
+ adapter->ahw.board_type = board_type;
+
+ switch (board_type) {
+ case NETXEN_BRDTYPE_P2_SB35_4G:
+ adapter->ahw.port_type = NETXEN_NIC_GBE;
+ break;
+ case NETXEN_BRDTYPE_P2_SB31_10G:
+ case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ:
+ case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ:
+ case NETXEN_BRDTYPE_P2_SB31_10G_CX4:
+ case NETXEN_BRDTYPE_P3_HMEZ:
+ case NETXEN_BRDTYPE_P3_XG_LOM:
+ case NETXEN_BRDTYPE_P3_10G_CX4:
+ case NETXEN_BRDTYPE_P3_10G_CX4_LP:
+ case NETXEN_BRDTYPE_P3_IMEZ:
+ case NETXEN_BRDTYPE_P3_10G_SFP_PLUS:
+ case NETXEN_BRDTYPE_P3_10G_SFP_CT:
+ case NETXEN_BRDTYPE_P3_10G_SFP_QT:
+ case NETXEN_BRDTYPE_P3_10G_XFP:
+ case NETXEN_BRDTYPE_P3_10000_BASE_T:
+ adapter->ahw.port_type = NETXEN_NIC_XGBE;
+ break;
+ case NETXEN_BRDTYPE_P1_BD:
+ case NETXEN_BRDTYPE_P1_SB:
+ case NETXEN_BRDTYPE_P1_SMAX:
+ case NETXEN_BRDTYPE_P1_SOCK:
+ case NETXEN_BRDTYPE_P3_REF_QG:
+ case NETXEN_BRDTYPE_P3_4_GB:
+ case NETXEN_BRDTYPE_P3_4_GB_MM:
+ adapter->ahw.port_type = NETXEN_NIC_GBE;
+ break;
+ case NETXEN_BRDTYPE_P3_10G_TP:
+ adapter->ahw.port_type = (adapter->portnum < 2) ?
+ NETXEN_NIC_XGBE : NETXEN_NIC_GBE;
+ break;
+ default:
+ dev_err(&pdev->dev, "unknown board type %x\n", board_type);
+ adapter->ahw.port_type = NETXEN_NIC_XGBE;
+ break;
+ }
+
+ return 0;
+}
+
+/* NIU access sections */
+static int netxen_nic_set_mtu_xgb(struct netxen_adapter *adapter, int new_mtu)
+{
+ new_mtu += MTU_FUDGE_FACTOR;
+ if (adapter->physical_port == 0)
+ NXWR32(adapter, NETXEN_NIU_XGE_MAX_FRAME_SIZE, new_mtu);
+ else
+ NXWR32(adapter, NETXEN_NIU_XG1_MAX_FRAME_SIZE, new_mtu);
+ return 0;
+}
+
+void netxen_nic_set_link_parameters(struct netxen_adapter *adapter)
+{
+ __u32 status;
+ __u32 autoneg;
+ __u32 port_mode;
+
+ if (!netif_carrier_ok(adapter->netdev)) {
+ adapter->link_speed = 0;
+ adapter->link_duplex = -1;
+ adapter->link_autoneg = AUTONEG_ENABLE;
+ return;
+ }
+
+ if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
+ port_mode = NXRD32(adapter, NETXEN_PORT_MODE_ADDR);
+ if (port_mode == NETXEN_PORT_MODE_802_3_AP) {
+ adapter->link_speed = SPEED_1000;
+ adapter->link_duplex = DUPLEX_FULL;
+ adapter->link_autoneg = AUTONEG_DISABLE;
+ return;
+ }
+
+ if (adapter->phy_read &&
+ adapter->phy_read(adapter,
+ NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
+ &status) == 0) {
+ if (netxen_get_phy_link(status)) {
+ switch (netxen_get_phy_speed(status)) {
+ case 0:
+ adapter->link_speed = SPEED_10;
+ break;
+ case 1:
+ adapter->link_speed = SPEED_100;
+ break;
+ case 2:
+ adapter->link_speed = SPEED_1000;
+ break;
+ default:
+ adapter->link_speed = 0;
+ break;
+ }
+ switch (netxen_get_phy_duplex(status)) {
+ case 0:
+ adapter->link_duplex = DUPLEX_HALF;
+ break;
+ case 1:
+ adapter->link_duplex = DUPLEX_FULL;
+ break;
+ default:
+ adapter->link_duplex = -1;
+ break;
+ }
+ if (adapter->phy_read &&
+ adapter->phy_read(adapter,
+ NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG,
+ &autoneg) == 0)
+ adapter->link_autoneg = autoneg;
+ } else
+ goto link_down;
+ } else {
+ link_down:
+ adapter->link_speed = 0;
+ adapter->link_duplex = -1;
+ }
+ }
+}
+
+int
+netxen_nic_wol_supported(struct netxen_adapter *adapter)
+{
+ u32 wol_cfg;
+
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ return 0;
+
+ wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG_NV);
+ if (wol_cfg & (1UL << adapter->portnum)) {
+ wol_cfg = NXRD32(adapter, NETXEN_WOL_CONFIG);
+ if (wol_cfg & (1 << adapter->portnum))
+ return 1;
+ }
+
+ return 0;
+}
+
+static u32 netxen_md_cntrl(struct netxen_adapter *adapter,
+ struct netxen_minidump_template_hdr *template_hdr,
+ struct netxen_minidump_entry_crb *crtEntry)
+{
+ int loop_cnt, i, rv = 0, timeout_flag;
+ u32 op_count, stride;
+ u32 opcode, read_value, addr;
+ unsigned long timeout, timeout_jiffies;
+ addr = crtEntry->addr;
+ op_count = crtEntry->op_count;
+ stride = crtEntry->addr_stride;
+
+ for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) {
+ for (i = 0; i < sizeof(crtEntry->opcode) * 8; i++) {
+ opcode = (crtEntry->opcode & (0x1 << i));
+ if (opcode) {
+ switch (opcode) {
+ case NX_DUMP_WCRB:
+ NX_WR_DUMP_REG(addr,
+ adapter->ahw.pci_base0,
+ crtEntry->value_1);
+ break;
+ case NX_DUMP_RWCRB:
+ NX_RD_DUMP_REG(addr,
+ adapter->ahw.pci_base0,
+ &read_value);
+ NX_WR_DUMP_REG(addr,
+ adapter->ahw.pci_base0,
+ read_value);
+ break;
+ case NX_DUMP_ANDCRB:
+ NX_RD_DUMP_REG(addr,
+ adapter->ahw.pci_base0,
+ &read_value);
+ read_value &= crtEntry->value_2;
+ NX_WR_DUMP_REG(addr,
+ adapter->ahw.pci_base0,
+ read_value);
+ break;
+ case NX_DUMP_ORCRB:
+ NX_RD_DUMP_REG(addr,
+ adapter->ahw.pci_base0,
+ &read_value);
+ read_value |= crtEntry->value_3;
+ NX_WR_DUMP_REG(addr,
+ adapter->ahw.pci_base0,
+ read_value);
+ break;
+ case NX_DUMP_POLLCRB:
+ timeout = crtEntry->poll_timeout;
+ NX_RD_DUMP_REG(addr,
+ adapter->ahw.pci_base0,
+ &read_value);
+ timeout_jiffies =
+ msecs_to_jiffies(timeout) + jiffies;
+ for (timeout_flag = 0;
+ !timeout_flag
+ && ((read_value & crtEntry->value_2)
+ != crtEntry->value_1);) {
+ if (time_after(jiffies,
+ timeout_jiffies))
+ timeout_flag = 1;
+ NX_RD_DUMP_REG(addr,
+ adapter->ahw.pci_base0,
+ &read_value);
+ }
+
+ if (timeout_flag) {
+ dev_err(&adapter->pdev->dev, "%s : "
+ "Timeout in poll_crb control operation.\n"
+ , __func__);
+ return -1;
+ }
+ break;
+ case NX_DUMP_RD_SAVE:
+ /* Decide which address to use */
+ if (crtEntry->state_index_a)
+ addr =
+ template_hdr->saved_state_array
+ [crtEntry->state_index_a];
+ NX_RD_DUMP_REG(addr,
+ adapter->ahw.pci_base0,
+ &read_value);
+ template_hdr->saved_state_array
+ [crtEntry->state_index_v]
+ = read_value;
+ break;
+ case NX_DUMP_WRT_SAVED:
+ /* Decide which value to use */
+ if (crtEntry->state_index_v)
+ read_value =
+ template_hdr->saved_state_array
+ [crtEntry->state_index_v];
+ else
+ read_value = crtEntry->value_1;
+
+ /* Decide which address to use */
+ if (crtEntry->state_index_a)
+ addr =
+ template_hdr->saved_state_array
+ [crtEntry->state_index_a];
+
+ NX_WR_DUMP_REG(addr,
+ adapter->ahw.pci_base0,
+ read_value);
+ break;
+ case NX_DUMP_MOD_SAVE_ST:
+ read_value =
+ template_hdr->saved_state_array
+ [crtEntry->state_index_v];
+ read_value <<= crtEntry->shl;
+ read_value >>= crtEntry->shr;
+ if (crtEntry->value_2)
+ read_value &=
+ crtEntry->value_2;
+ read_value |= crtEntry->value_3;
+ read_value += crtEntry->value_1;
+ /* Write value back to state area.*/
+ template_hdr->saved_state_array
+ [crtEntry->state_index_v]
+ = read_value;
+ break;
+ default:
+ rv = 1;
+ break;
+ }
+ }
+ }
+ addr = addr + stride;
+ }
+ return rv;
+}
+
+/* Read memory or MN */
+static u32
+netxen_md_rdmem(struct netxen_adapter *adapter,
+ struct netxen_minidump_entry_rdmem
+ *memEntry, u64 *data_buff)
+{
+ u64 addr, value = 0;
+ int i = 0, loop_cnt;
+
+ addr = (u64)memEntry->read_addr;
+ loop_cnt = memEntry->read_data_size; /* This is size in bytes */
+ loop_cnt /= sizeof(value);
+
+ for (i = 0; i < loop_cnt; i++) {
+ if (netxen_nic_pci_mem_read_2M(adapter, addr, &value))
+ goto out;
+ *data_buff++ = value;
+ addr += sizeof(value);
+ }
+out:
+ return i * sizeof(value);
+}
+
+/* Read CRB operation */
+static u32 netxen_md_rd_crb(struct netxen_adapter *adapter,
+ struct netxen_minidump_entry_crb
+ *crbEntry, u32 *data_buff)
+{
+ int loop_cnt;
+ u32 op_count, addr, stride, value;
+
+ addr = crbEntry->addr;
+ op_count = crbEntry->op_count;
+ stride = crbEntry->addr_stride;
+
+ for (loop_cnt = 0; loop_cnt < op_count; loop_cnt++) {
+ NX_RD_DUMP_REG(addr, adapter->ahw.pci_base0, &value);
+ *data_buff++ = addr;
+ *data_buff++ = value;
+ addr = addr + stride;
+ }
+ return loop_cnt * (2 * sizeof(u32));
+}
+
+/* Read ROM */
+static u32
+netxen_md_rdrom(struct netxen_adapter *adapter,
+ struct netxen_minidump_entry_rdrom
+ *romEntry, __le32 *data_buff)
+{
+ int i, count = 0;
+ u32 size, lck_val;
+ u32 val;
+ u32 fl_addr, waddr, raddr;
+ fl_addr = romEntry->read_addr;
+ size = romEntry->read_data_size/4;
+lock_try:
+ lck_val = readl((void __iomem *)(adapter->ahw.pci_base0 +
+ NX_FLASH_SEM2_LK));
+ if (!lck_val && count < MAX_CTL_CHECK) {
+ msleep(20);
+ count++;
+ goto lock_try;
+ }
+ writel(adapter->ahw.pci_func, (void __iomem *)(adapter->ahw.pci_base0 +
+ NX_FLASH_LOCK_ID));
+ for (i = 0; i < size; i++) {
+ waddr = fl_addr & 0xFFFF0000;
+ NX_WR_DUMP_REG(FLASH_ROM_WINDOW, adapter->ahw.pci_base0, waddr);
+ raddr = FLASH_ROM_DATA + (fl_addr & 0x0000FFFF);
+ NX_RD_DUMP_REG(raddr, adapter->ahw.pci_base0, &val);
+ *data_buff++ = cpu_to_le32(val);
+ fl_addr += sizeof(val);
+ }
+ readl((void __iomem *)(adapter->ahw.pci_base0 + NX_FLASH_SEM2_ULK));
+ return romEntry->read_data_size;
+}
+
+/* Handle L2 Cache */
+static u32
+netxen_md_L2Cache(struct netxen_adapter *adapter,
+ struct netxen_minidump_entry_cache
+ *cacheEntry, u32 *data_buff)
+{
+ int loop_cnt, i, k, timeout_flag = 0;
+ u32 addr, read_addr, read_value, cntrl_addr, tag_reg_addr;
+ u32 tag_value, read_cnt;
+ u8 cntl_value_w, cntl_value_r;
+ unsigned long timeout, timeout_jiffies;
+
+ loop_cnt = cacheEntry->op_count;
+ read_addr = cacheEntry->read_addr;
+ cntrl_addr = cacheEntry->control_addr;
+ cntl_value_w = (u32) cacheEntry->write_value;
+ tag_reg_addr = cacheEntry->tag_reg_addr;
+ tag_value = cacheEntry->init_tag_value;
+ read_cnt = cacheEntry->read_addr_cnt;
+
+ for (i = 0; i < loop_cnt; i++) {
+ NX_WR_DUMP_REG(tag_reg_addr, adapter->ahw.pci_base0, tag_value);
+ if (cntl_value_w)
+ NX_WR_DUMP_REG(cntrl_addr, adapter->ahw.pci_base0,
+ (u32)cntl_value_w);
+ if (cacheEntry->poll_mask) {
+ timeout = cacheEntry->poll_wait;
+ NX_RD_DUMP_REG(cntrl_addr, adapter->ahw.pci_base0,
+ &cntl_value_r);
+ timeout_jiffies = msecs_to_jiffies(timeout) + jiffies;
+ for (timeout_flag = 0; !timeout_flag &&
+ ((cntl_value_r & cacheEntry->poll_mask) != 0);) {
+ if (time_after(jiffies, timeout_jiffies))
+ timeout_flag = 1;
+ NX_RD_DUMP_REG(cntrl_addr,
+ adapter->ahw.pci_base0,
+ &cntl_value_r);
+ }
+ if (timeout_flag) {
+ dev_err(&adapter->pdev->dev,
+ "Timeout in processing L2 Tag poll.\n");
+ return -1;
+ }
+ }
+ addr = read_addr;
+ for (k = 0; k < read_cnt; k++) {
+ NX_RD_DUMP_REG(addr, adapter->ahw.pci_base0,
+ &read_value);
+ *data_buff++ = read_value;
+ addr += cacheEntry->read_addr_stride;
+ }
+ tag_value += cacheEntry->tag_value_stride;
+ }
+ return read_cnt * loop_cnt * sizeof(read_value);
+}
+
+
+/* Handle L1 Cache */
+static u32 netxen_md_L1Cache(struct netxen_adapter *adapter,
+ struct netxen_minidump_entry_cache
+ *cacheEntry, u32 *data_buff)
+{
+ int i, k, loop_cnt;
+ u32 addr, read_addr, read_value, cntrl_addr, tag_reg_addr;
+ u32 tag_value, read_cnt;
+ u8 cntl_value_w;
+
+ loop_cnt = cacheEntry->op_count;
+ read_addr = cacheEntry->read_addr;
+ cntrl_addr = cacheEntry->control_addr;
+ cntl_value_w = (u32) cacheEntry->write_value;
+ tag_reg_addr = cacheEntry->tag_reg_addr;
+ tag_value = cacheEntry->init_tag_value;
+ read_cnt = cacheEntry->read_addr_cnt;
+
+ for (i = 0; i < loop_cnt; i++) {
+ NX_WR_DUMP_REG(tag_reg_addr, adapter->ahw.pci_base0, tag_value);
+ NX_WR_DUMP_REG(cntrl_addr, adapter->ahw.pci_base0,
+ (u32) cntl_value_w);
+ addr = read_addr;
+ for (k = 0; k < read_cnt; k++) {
+ NX_RD_DUMP_REG(addr,
+ adapter->ahw.pci_base0,
+ &read_value);
+ *data_buff++ = read_value;
+ addr += cacheEntry->read_addr_stride;
+ }
+ tag_value += cacheEntry->tag_value_stride;
+ }
+ return read_cnt * loop_cnt * sizeof(read_value);
+}
+
+/* Reading OCM memory */
+static u32
+netxen_md_rdocm(struct netxen_adapter *adapter,
+ struct netxen_minidump_entry_rdocm
+ *ocmEntry, u32 *data_buff)
+{
+ int i, loop_cnt;
+ u32 value;
+ void __iomem *addr;
+ addr = (ocmEntry->read_addr + adapter->ahw.pci_base0);
+ loop_cnt = ocmEntry->op_count;
+
+ for (i = 0; i < loop_cnt; i++) {
+ value = readl(addr);
+ *data_buff++ = value;
+ addr += ocmEntry->read_addr_stride;
+ }
+ return i * sizeof(u32);
+}
+
+/* Read MUX data */
+static u32
+netxen_md_rdmux(struct netxen_adapter *adapter, struct netxen_minidump_entry_mux
+ *muxEntry, u32 *data_buff)
+{
+ int loop_cnt = 0;
+ u32 read_addr, read_value, select_addr, sel_value;
+
+ read_addr = muxEntry->read_addr;
+ sel_value = muxEntry->select_value;
+ select_addr = muxEntry->select_addr;
+
+ for (loop_cnt = 0; loop_cnt < muxEntry->op_count; loop_cnt++) {
+ NX_WR_DUMP_REG(select_addr, adapter->ahw.pci_base0, sel_value);
+ NX_RD_DUMP_REG(read_addr, adapter->ahw.pci_base0, &read_value);
+ *data_buff++ = sel_value;
+ *data_buff++ = read_value;
+ sel_value += muxEntry->select_value_stride;
+ }
+ return loop_cnt * (2 * sizeof(u32));
+}
+
+/* Handling Queue State Reads */
+static u32
+netxen_md_rdqueue(struct netxen_adapter *adapter,
+ struct netxen_minidump_entry_queue
+ *queueEntry, u32 *data_buff)
+{
+ int loop_cnt, k;
+ u32 queue_id, read_addr, read_value, read_stride, select_addr, read_cnt;
+
+ read_cnt = queueEntry->read_addr_cnt;
+ read_stride = queueEntry->read_addr_stride;
+ select_addr = queueEntry->select_addr;
+
+ for (loop_cnt = 0, queue_id = 0; loop_cnt < queueEntry->op_count;
+ loop_cnt++) {
+ NX_WR_DUMP_REG(select_addr, adapter->ahw.pci_base0, queue_id);
+ read_addr = queueEntry->read_addr;
+ for (k = 0; k < read_cnt; k++) {
+ NX_RD_DUMP_REG(read_addr, adapter->ahw.pci_base0,
+ &read_value);
+ *data_buff++ = read_value;
+ read_addr += read_stride;
+ }
+ queue_id += queueEntry->queue_id_stride;
+ }
+ return loop_cnt * (read_cnt * sizeof(read_value));
+}
+
+
+/*
+* We catch an error where driver does not read
+* as much data as we expect from the entry.
+*/
+
+static int netxen_md_entry_err_chk(struct netxen_adapter *adapter,
+ struct netxen_minidump_entry *entry, int esize)
+{
+ if (esize < 0) {
+ entry->hdr.driver_flags |= NX_DUMP_SKIP;
+ return esize;
+ }
+ if (esize != entry->hdr.entry_capture_size) {
+ entry->hdr.entry_capture_size = esize;
+ entry->hdr.driver_flags |= NX_DUMP_SIZE_ERR;
+ dev_info(&adapter->pdev->dev,
+ "Invalidate dump, Type:%d\tMask:%d\tSize:%dCap_size:%d\n",
+ entry->hdr.entry_type, entry->hdr.entry_capture_mask,
+ esize, entry->hdr.entry_capture_size);
+ dev_info(&adapter->pdev->dev, "Aborting further dump capture\n");
+ }
+ return 0;
+}
+
+static int netxen_parse_md_template(struct netxen_adapter *adapter)
+{
+ int num_of_entries, buff_level, e_cnt, esize;
+ int rv = 0, sane_start = 0, sane_end = 0;
+ char *dbuff;
+ void *template_buff = adapter->mdump.md_template;
+ char *dump_buff = adapter->mdump.md_capture_buff;
+ int capture_mask = adapter->mdump.md_capture_mask;
+ struct netxen_minidump_template_hdr *template_hdr;
+ struct netxen_minidump_entry *entry;
+
+ if ((capture_mask & 0x3) != 0x3) {
+ dev_err(&adapter->pdev->dev, "Capture mask %02x below minimum needed "
+ "for valid firmware dump\n", capture_mask);
+ return -EINVAL;
+ }
+ template_hdr = (struct netxen_minidump_template_hdr *) template_buff;
+ num_of_entries = template_hdr->num_of_entries;
+ entry = (struct netxen_minidump_entry *) ((char *) template_buff +
+ template_hdr->first_entry_offset);
+ memcpy(dump_buff, template_buff, adapter->mdump.md_template_size);
+ dump_buff = dump_buff + adapter->mdump.md_template_size;
+
+ if (template_hdr->entry_type == TLHDR)
+ sane_start = 1;
+
+ for (e_cnt = 0, buff_level = 0; e_cnt < num_of_entries; e_cnt++) {
+ if (!(entry->hdr.entry_capture_mask & capture_mask)) {
+ entry->hdr.driver_flags |= NX_DUMP_SKIP;
+ entry = (struct netxen_minidump_entry *)
+ ((char *) entry + entry->hdr.entry_size);
+ continue;
+ }
+ switch (entry->hdr.entry_type) {
+ case RDNOP:
+ entry->hdr.driver_flags |= NX_DUMP_SKIP;
+ break;
+ case RDEND:
+ entry->hdr.driver_flags |= NX_DUMP_SKIP;
+ sane_end += 1;
+ break;
+ case CNTRL:
+ rv = netxen_md_cntrl(adapter,
+ template_hdr, (void *)entry);
+ if (rv)
+ entry->hdr.driver_flags |= NX_DUMP_SKIP;
+ break;
+ case RDCRB:
+ dbuff = dump_buff + buff_level;
+ esize = netxen_md_rd_crb(adapter,
+ (void *) entry, (void *) dbuff);
+ rv = netxen_md_entry_err_chk
+ (adapter, entry, esize);
+ if (rv < 0)
+ break;
+ buff_level += esize;
+ break;
+ case RDMN:
+ case RDMEM:
+ dbuff = dump_buff + buff_level;
+ esize = netxen_md_rdmem(adapter,
+ (void *) entry, (void *) dbuff);
+ rv = netxen_md_entry_err_chk
+ (adapter, entry, esize);
+ if (rv < 0)
+ break;
+ buff_level += esize;
+ break;
+ case BOARD:
+ case RDROM:
+ dbuff = dump_buff + buff_level;
+ esize = netxen_md_rdrom(adapter,
+ (void *) entry, (void *) dbuff);
+ rv = netxen_md_entry_err_chk
+ (adapter, entry, esize);
+ if (rv < 0)
+ break;
+ buff_level += esize;
+ break;
+ case L2ITG:
+ case L2DTG:
+ case L2DAT:
+ case L2INS:
+ dbuff = dump_buff + buff_level;
+ esize = netxen_md_L2Cache(adapter,
+ (void *) entry, (void *) dbuff);
+ rv = netxen_md_entry_err_chk
+ (adapter, entry, esize);
+ if (rv < 0)
+ break;
+ buff_level += esize;
+ break;
+ case L1DAT:
+ case L1INS:
+ dbuff = dump_buff + buff_level;
+ esize = netxen_md_L1Cache(adapter,
+ (void *) entry, (void *) dbuff);
+ rv = netxen_md_entry_err_chk
+ (adapter, entry, esize);
+ if (rv < 0)
+ break;
+ buff_level += esize;
+ break;
+ case RDOCM:
+ dbuff = dump_buff + buff_level;
+ esize = netxen_md_rdocm(adapter,
+ (void *) entry, (void *) dbuff);
+ rv = netxen_md_entry_err_chk
+ (adapter, entry, esize);
+ if (rv < 0)
+ break;
+ buff_level += esize;
+ break;
+ case RDMUX:
+ dbuff = dump_buff + buff_level;
+ esize = netxen_md_rdmux(adapter,
+ (void *) entry, (void *) dbuff);
+ rv = netxen_md_entry_err_chk
+ (adapter, entry, esize);
+ if (rv < 0)
+ break;
+ buff_level += esize;
+ break;
+ case QUEUE:
+ dbuff = dump_buff + buff_level;
+ esize = netxen_md_rdqueue(adapter,
+ (void *) entry, (void *) dbuff);
+ rv = netxen_md_entry_err_chk
+ (adapter, entry, esize);
+ if (rv < 0)
+ break;
+ buff_level += esize;
+ break;
+ default:
+ entry->hdr.driver_flags |= NX_DUMP_SKIP;
+ break;
+ }
+ /* Next entry in the template */
+ entry = (struct netxen_minidump_entry *)
+ ((char *) entry + entry->hdr.entry_size);
+ }
+ if (!sane_start || sane_end > 1) {
+ dev_err(&adapter->pdev->dev,
+ "Firmware minidump template configuration error.\n");
+ }
+ return 0;
+}
+
+static int
+netxen_collect_minidump(struct netxen_adapter *adapter)
+{
+ int ret = 0;
+ struct netxen_minidump_template_hdr *hdr;
+ hdr = (struct netxen_minidump_template_hdr *)
+ adapter->mdump.md_template;
+ hdr->driver_capture_mask = adapter->mdump.md_capture_mask;
+ hdr->driver_timestamp = ktime_get_seconds();
+ hdr->driver_info_word2 = adapter->fw_version;
+ hdr->driver_info_word3 = NXRD32(adapter, CRB_DRIVER_VERSION);
+ ret = netxen_parse_md_template(adapter);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+
+void
+netxen_dump_fw(struct netxen_adapter *adapter)
+{
+ struct netxen_minidump_template_hdr *hdr;
+ int i, k, data_size = 0;
+ u32 capture_mask;
+ hdr = (struct netxen_minidump_template_hdr *)
+ adapter->mdump.md_template;
+ capture_mask = adapter->mdump.md_capture_mask;
+
+ for (i = 0x2, k = 1; (i & NX_DUMP_MASK_MAX); i <<= 1, k++) {
+ if (i & capture_mask)
+ data_size += hdr->capture_size_array[k];
+ }
+ if (!data_size) {
+ dev_err(&adapter->pdev->dev,
+ "Invalid cap sizes for capture_mask=0x%x\n",
+ adapter->mdump.md_capture_mask);
+ return;
+ }
+ adapter->mdump.md_capture_size = data_size;
+ adapter->mdump.md_dump_size = adapter->mdump.md_template_size +
+ adapter->mdump.md_capture_size;
+ if (!adapter->mdump.md_capture_buff) {
+ adapter->mdump.md_capture_buff =
+ vzalloc(adapter->mdump.md_dump_size);
+ if (!adapter->mdump.md_capture_buff)
+ return;
+
+ if (netxen_collect_minidump(adapter)) {
+ adapter->mdump.has_valid_dump = 0;
+ adapter->mdump.md_dump_size = 0;
+ vfree(adapter->mdump.md_capture_buff);
+ adapter->mdump.md_capture_buff = NULL;
+ dev_err(&adapter->pdev->dev,
+ "Error in collecting firmware minidump.\n");
+ } else {
+ adapter->mdump.md_timestamp = jiffies;
+ adapter->mdump.has_valid_dump = 1;
+ adapter->fw_mdump_rdy = 1;
+ dev_info(&adapter->pdev->dev, "%s Successfully "
+ "collected fw dump.\n", adapter->netdev->name);
+ }
+
+ } else {
+ dev_info(&adapter->pdev->dev,
+ "Cannot overwrite previously collected "
+ "firmware minidump.\n");
+ adapter->fw_mdump_rdy = 1;
+ return;
+ }
+}
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.h
new file mode 100644
index 000000000..de73766e1
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.h
@@ -0,0 +1,269 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2003 - 2009 NetXen, Inc.
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ */
+
+#ifndef __NETXEN_NIC_HW_H_
+#define __NETXEN_NIC_HW_H_
+
+/* Hardware memory size of 128 meg */
+#define NETXEN_MEMADDR_MAX (128 * 1024 * 1024)
+
+struct netxen_adapter;
+
+#define NETXEN_PCI_MAPSIZE_BYTES (NETXEN_PCI_MAPSIZE << 20)
+
+void netxen_nic_set_link_parameters(struct netxen_adapter *adapter);
+
+/* Nibble or Byte mode for phy interface (GbE mode only) */
+
+#define _netxen_crb_get_bit(var, bit) ((var >> bit) & 0x1)
+
+/*
+ * NIU GB MAC Config Register 0 (applies to GB0, GB1, GB2, GB3)
+ *
+ * Bit 0 : enable_tx => 1:enable frame xmit, 0:disable
+ * Bit 1 : tx_synced => R/O: xmit enable synched to xmit stream
+ * Bit 2 : enable_rx => 1:enable frame recv, 0:disable
+ * Bit 3 : rx_synced => R/O: recv enable synched to recv stream
+ * Bit 4 : tx_flowctl => 1:enable pause frame generation, 0:disable
+ * Bit 5 : rx_flowctl => 1:act on recv'd pause frames, 0:ignore
+ * Bit 8 : loopback => 1:loop MAC xmits to MAC recvs, 0:normal
+ * Bit 16: tx_reset_pb => 1:reset frame xmit protocol blk, 0:no-op
+ * Bit 17: rx_reset_pb => 1:reset frame recv protocol blk, 0:no-op
+ * Bit 18: tx_reset_mac => 1:reset data/ctl multiplexer blk, 0:no-op
+ * Bit 19: rx_reset_mac => 1:reset ctl frames & timers blk, 0:no-op
+ * Bit 31: soft_reset => 1:reset the MAC and the SERDES, 0:no-op
+ */
+
+#define netxen_gb_tx_flowctl(config_word) \
+ ((config_word) |= 1 << 4)
+#define netxen_gb_rx_flowctl(config_word) \
+ ((config_word) |= 1 << 5)
+#define netxen_gb_tx_reset_pb(config_word) \
+ ((config_word) |= 1 << 16)
+#define netxen_gb_rx_reset_pb(config_word) \
+ ((config_word) |= 1 << 17)
+#define netxen_gb_tx_reset_mac(config_word) \
+ ((config_word) |= 1 << 18)
+#define netxen_gb_rx_reset_mac(config_word) \
+ ((config_word) |= 1 << 19)
+
+#define netxen_gb_unset_tx_flowctl(config_word) \
+ ((config_word) &= ~(1 << 4))
+#define netxen_gb_unset_rx_flowctl(config_word) \
+ ((config_word) &= ~(1 << 5))
+
+#define netxen_gb_get_tx_synced(config_word) \
+ _netxen_crb_get_bit((config_word), 1)
+#define netxen_gb_get_rx_synced(config_word) \
+ _netxen_crb_get_bit((config_word), 3)
+#define netxen_gb_get_tx_flowctl(config_word) \
+ _netxen_crb_get_bit((config_word), 4)
+#define netxen_gb_get_rx_flowctl(config_word) \
+ _netxen_crb_get_bit((config_word), 5)
+#define netxen_gb_get_soft_reset(config_word) \
+ _netxen_crb_get_bit((config_word), 31)
+
+#define netxen_gb_get_stationaddress_low(config_word) ((config_word) >> 16)
+
+#define netxen_gb_set_mii_mgmt_clockselect(config_word, val) \
+ ((config_word) |= ((val) & 0x07))
+#define netxen_gb_mii_mgmt_reset(config_word) \
+ ((config_word) |= 1 << 31)
+#define netxen_gb_mii_mgmt_unset(config_word) \
+ ((config_word) &= ~(1 << 31))
+
+/*
+ * NIU GB MII Mgmt Command Register (applies to GB0, GB1, GB2, GB3)
+ * Bit 0 : read_cycle => 1:perform single read cycle, 0:no-op
+ * Bit 1 : scan_cycle => 1:perform continuous read cycles, 0:no-op
+ */
+
+#define netxen_gb_mii_mgmt_set_read_cycle(config_word) \
+ ((config_word) |= 1 << 0)
+#define netxen_gb_mii_mgmt_reg_addr(config_word, val) \
+ ((config_word) |= ((val) & 0x1F))
+#define netxen_gb_mii_mgmt_phy_addr(config_word, val) \
+ ((config_word) |= (((val) & 0x1F) << 8))
+
+/*
+ * NIU GB MII Mgmt Indicators Register (applies to GB0, GB1, GB2, GB3)
+ * Read-only register.
+ * Bit 0 : busy => 1:performing an MII mgmt cycle, 0:idle
+ * Bit 1 : scanning => 1:scan operation in progress, 0:idle
+ * Bit 2 : notvalid => :mgmt result data not yet valid, 0:idle
+ */
+#define netxen_get_gb_mii_mgmt_busy(config_word) \
+ _netxen_crb_get_bit(config_word, 0)
+#define netxen_get_gb_mii_mgmt_scanning(config_word) \
+ _netxen_crb_get_bit(config_word, 1)
+#define netxen_get_gb_mii_mgmt_notvalid(config_word) \
+ _netxen_crb_get_bit(config_word, 2)
+/*
+ * NIU XG Pause Ctl Register
+ *
+ * Bit 0 : xg0_mask => 1:disable tx pause frames
+ * Bit 1 : xg0_request => 1:request single pause frame
+ * Bit 2 : xg0_on_off => 1:request is pause on, 0:off
+ * Bit 3 : xg1_mask => 1:disable tx pause frames
+ * Bit 4 : xg1_request => 1:request single pause frame
+ * Bit 5 : xg1_on_off => 1:request is pause on, 0:off
+ */
+
+#define netxen_xg_set_xg0_mask(config_word) \
+ ((config_word) |= 1 << 0)
+#define netxen_xg_set_xg1_mask(config_word) \
+ ((config_word) |= 1 << 3)
+
+#define netxen_xg_get_xg0_mask(config_word) \
+ _netxen_crb_get_bit((config_word), 0)
+#define netxen_xg_get_xg1_mask(config_word) \
+ _netxen_crb_get_bit((config_word), 3)
+
+#define netxen_xg_unset_xg0_mask(config_word) \
+ ((config_word) &= ~(1 << 0))
+#define netxen_xg_unset_xg1_mask(config_word) \
+ ((config_word) &= ~(1 << 3))
+
+/*
+ * NIU XG Pause Ctl Register
+ *
+ * Bit 0 : xg0_mask => 1:disable tx pause frames
+ * Bit 1 : xg0_request => 1:request single pause frame
+ * Bit 2 : xg0_on_off => 1:request is pause on, 0:off
+ * Bit 3 : xg1_mask => 1:disable tx pause frames
+ * Bit 4 : xg1_request => 1:request single pause frame
+ * Bit 5 : xg1_on_off => 1:request is pause on, 0:off
+ */
+#define netxen_gb_set_gb0_mask(config_word) \
+ ((config_word) |= 1 << 0)
+#define netxen_gb_set_gb1_mask(config_word) \
+ ((config_word) |= 1 << 2)
+#define netxen_gb_set_gb2_mask(config_word) \
+ ((config_word) |= 1 << 4)
+#define netxen_gb_set_gb3_mask(config_word) \
+ ((config_word) |= 1 << 6)
+
+#define netxen_gb_get_gb0_mask(config_word) \
+ _netxen_crb_get_bit((config_word), 0)
+#define netxen_gb_get_gb1_mask(config_word) \
+ _netxen_crb_get_bit((config_word), 2)
+#define netxen_gb_get_gb2_mask(config_word) \
+ _netxen_crb_get_bit((config_word), 4)
+#define netxen_gb_get_gb3_mask(config_word) \
+ _netxen_crb_get_bit((config_word), 6)
+
+#define netxen_gb_unset_gb0_mask(config_word) \
+ ((config_word) &= ~(1 << 0))
+#define netxen_gb_unset_gb1_mask(config_word) \
+ ((config_word) &= ~(1 << 2))
+#define netxen_gb_unset_gb2_mask(config_word) \
+ ((config_word) &= ~(1 << 4))
+#define netxen_gb_unset_gb3_mask(config_word) \
+ ((config_word) &= ~(1 << 6))
+
+
+/*
+ * PHY-Specific MII control/status registers.
+ */
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_CONTROL 0
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_STATUS 1
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_ID_0 2
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_ID_1 3
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG 4
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_LNKPART 5
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG_MORE 6
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_NEXTPAGE_XMIT 7
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_LNKPART_NEXTPAGE 8
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_1000BT_CONTROL 9
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_1000BT_STATUS 10
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_EXTENDED_STATUS 15
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_CONTROL 16
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS 17
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_INT_ENABLE 18
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_INT_STATUS 19
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_CONTROL_MORE 20
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_RECV_ERROR_COUNT 21
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_LED_CONTROL 24
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_LED_OVERRIDE 25
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_CONTROL_MORE_YET 26
+#define NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS_MORE 27
+
+/*
+ * PHY-Specific Status Register (reg 17).
+ *
+ * Bit 0 : jabber => 1:jabber detected, 0:not
+ * Bit 1 : polarity => 1:polarity reversed, 0:normal
+ * Bit 2 : recvpause => 1:receive pause enabled, 0:disabled
+ * Bit 3 : xmitpause => 1:transmit pause enabled, 0:disabled
+ * Bit 4 : energydetect => 1:sleep, 0:active
+ * Bit 5 : downshift => 1:downshift, 0:no downshift
+ * Bit 6 : crossover => 1:MDIX (crossover), 0:MDI (no crossover)
+ * Bits 7-9 : cablelen => not valid in 10Mb/s mode
+ * 0:<50m, 1:50-80m, 2:80-110m, 3:110-140m, 4:>140m
+ * Bit 10 : link => 1:link up, 0:link down
+ * Bit 11 : resolved => 1:speed and duplex resolved, 0:not yet
+ * Bit 12 : pagercvd => 1:page received, 0:page not received
+ * Bit 13 : duplex => 1:full duplex, 0:half duplex
+ * Bits 14-15 : speed => 0:10Mb/s, 1:100Mb/s, 2:1000Mb/s, 3:rsvd
+ */
+
+#define netxen_get_phy_speed(config_word) (((config_word) >> 14) & 0x03)
+
+#define netxen_set_phy_speed(config_word, val) \
+ ((config_word) |= ((val & 0x03) << 14))
+#define netxen_set_phy_duplex(config_word) \
+ ((config_word) |= 1 << 13)
+#define netxen_clear_phy_duplex(config_word) \
+ ((config_word) &= ~(1 << 13))
+
+#define netxen_get_phy_link(config_word) \
+ _netxen_crb_get_bit(config_word, 10)
+#define netxen_get_phy_duplex(config_word) \
+ _netxen_crb_get_bit(config_word, 13)
+
+/*
+ * NIU Mode Register.
+ * Bit 0 : enable FibreChannel
+ * Bit 1 : enable 10/100/1000 Ethernet
+ * Bit 2 : enable 10Gb Ethernet
+ */
+
+#define netxen_get_niu_enable_ge(config_word) \
+ _netxen_crb_get_bit(config_word, 1)
+
+#define NETXEN_NIU_NON_PROMISC_MODE 0
+#define NETXEN_NIU_PROMISC_MODE 1
+#define NETXEN_NIU_ALLMULTI_MODE 2
+
+/*
+ * NIU XG MAC Config Register
+ *
+ * Bit 0 : tx_enable => 1:enable frame xmit, 0:disable
+ * Bit 2 : rx_enable => 1:enable frame recv, 0:disable
+ * Bit 4 : soft_reset => 1:reset the MAC , 0:no-op
+ * Bit 27: xaui_framer_reset
+ * Bit 28: xaui_rx_reset
+ * Bit 29: xaui_tx_reset
+ * Bit 30: xg_ingress_afifo_reset
+ * Bit 31: xg_egress_afifo_reset
+ */
+
+#define netxen_xg_soft_reset(config_word) \
+ ((config_word) |= 1 << 4)
+
+typedef struct {
+ unsigned valid;
+ unsigned start_128M;
+ unsigned end_128M;
+ unsigned start_2M;
+} crb_128M_2M_sub_block_map_t;
+
+typedef struct {
+ crb_128M_2M_sub_block_map_t sub_block[16];
+} crb_128M_2M_block_map_t;
+
+#endif /* __NETXEN_NIC_HW_H_ */
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
new file mode 100644
index 000000000..35ec9aab3
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -0,0 +1,1913 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2003 - 2009 NetXen, Inc.
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ */
+
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/if_vlan.h>
+#include <net/checksum.h>
+#include "netxen_nic.h"
+#include "netxen_nic_hw.h"
+
+struct crb_addr_pair {
+ u32 addr;
+ u32 data;
+};
+
+#define NETXEN_MAX_CRB_XFORM 60
+static unsigned int crb_addr_xform[NETXEN_MAX_CRB_XFORM];
+#define NETXEN_ADDR_ERROR (0xffffffff)
+
+#define crb_addr_transform(name) \
+ crb_addr_xform[NETXEN_HW_PX_MAP_CRB_##name] = \
+ NETXEN_HW_CRB_HUB_AGT_ADR_##name << 20
+
+#define NETXEN_NIC_XDMA_RESET 0x8000ff
+
+static void
+netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter,
+ struct nx_host_rds_ring *rds_ring);
+static int netxen_p3_has_mn(struct netxen_adapter *adapter);
+
+static void crb_addr_transform_setup(void)
+{
+ crb_addr_transform(XDMA);
+ crb_addr_transform(TIMR);
+ crb_addr_transform(SRE);
+ crb_addr_transform(SQN3);
+ crb_addr_transform(SQN2);
+ crb_addr_transform(SQN1);
+ crb_addr_transform(SQN0);
+ crb_addr_transform(SQS3);
+ crb_addr_transform(SQS2);
+ crb_addr_transform(SQS1);
+ crb_addr_transform(SQS0);
+ crb_addr_transform(RPMX7);
+ crb_addr_transform(RPMX6);
+ crb_addr_transform(RPMX5);
+ crb_addr_transform(RPMX4);
+ crb_addr_transform(RPMX3);
+ crb_addr_transform(RPMX2);
+ crb_addr_transform(RPMX1);
+ crb_addr_transform(RPMX0);
+ crb_addr_transform(ROMUSB);
+ crb_addr_transform(SN);
+ crb_addr_transform(QMN);
+ crb_addr_transform(QMS);
+ crb_addr_transform(PGNI);
+ crb_addr_transform(PGND);
+ crb_addr_transform(PGN3);
+ crb_addr_transform(PGN2);
+ crb_addr_transform(PGN1);
+ crb_addr_transform(PGN0);
+ crb_addr_transform(PGSI);
+ crb_addr_transform(PGSD);
+ crb_addr_transform(PGS3);
+ crb_addr_transform(PGS2);
+ crb_addr_transform(PGS1);
+ crb_addr_transform(PGS0);
+ crb_addr_transform(PS);
+ crb_addr_transform(PH);
+ crb_addr_transform(NIU);
+ crb_addr_transform(I2Q);
+ crb_addr_transform(EG);
+ crb_addr_transform(MN);
+ crb_addr_transform(MS);
+ crb_addr_transform(CAS2);
+ crb_addr_transform(CAS1);
+ crb_addr_transform(CAS0);
+ crb_addr_transform(CAM);
+ crb_addr_transform(C2C1);
+ crb_addr_transform(C2C0);
+ crb_addr_transform(SMB);
+ crb_addr_transform(OCM0);
+ crb_addr_transform(I2C0);
+}
+
+void netxen_release_rx_buffers(struct netxen_adapter *adapter)
+{
+ struct netxen_recv_context *recv_ctx;
+ struct nx_host_rds_ring *rds_ring;
+ struct netxen_rx_buffer *rx_buf;
+ int i, ring;
+
+ recv_ctx = &adapter->recv_ctx;
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+ for (i = 0; i < rds_ring->num_desc; ++i) {
+ rx_buf = &(rds_ring->rx_buf_arr[i]);
+ if (rx_buf->state == NETXEN_BUFFER_FREE)
+ continue;
+ dma_unmap_single(&adapter->pdev->dev, rx_buf->dma,
+ rds_ring->dma_size, DMA_FROM_DEVICE);
+ if (rx_buf->skb != NULL)
+ dev_kfree_skb_any(rx_buf->skb);
+ }
+ }
+}
+
+void netxen_release_tx_buffers(struct netxen_adapter *adapter)
+{
+ struct netxen_cmd_buffer *cmd_buf;
+ struct netxen_skb_frag *buffrag;
+ int i, j;
+ struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
+
+ spin_lock_bh(&adapter->tx_clean_lock);
+ cmd_buf = tx_ring->cmd_buf_arr;
+ for (i = 0; i < tx_ring->num_desc; i++) {
+ buffrag = cmd_buf->frag_array;
+ if (buffrag->dma) {
+ dma_unmap_single(&adapter->pdev->dev, buffrag->dma,
+ buffrag->length, DMA_TO_DEVICE);
+ buffrag->dma = 0ULL;
+ }
+ for (j = 1; j < cmd_buf->frag_count; j++) {
+ buffrag++;
+ if (buffrag->dma) {
+ dma_unmap_page(&adapter->pdev->dev,
+ buffrag->dma, buffrag->length,
+ DMA_TO_DEVICE);
+ buffrag->dma = 0ULL;
+ }
+ }
+ if (cmd_buf->skb) {
+ dev_kfree_skb_any(cmd_buf->skb);
+ cmd_buf->skb = NULL;
+ }
+ cmd_buf++;
+ }
+ spin_unlock_bh(&adapter->tx_clean_lock);
+}
+
+void netxen_free_sw_resources(struct netxen_adapter *adapter)
+{
+ struct netxen_recv_context *recv_ctx;
+ struct nx_host_rds_ring *rds_ring;
+ struct nx_host_tx_ring *tx_ring;
+ int ring;
+
+ recv_ctx = &adapter->recv_ctx;
+
+ if (recv_ctx->rds_rings == NULL)
+ goto skip_rds;
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+ vfree(rds_ring->rx_buf_arr);
+ rds_ring->rx_buf_arr = NULL;
+ }
+ kfree(recv_ctx->rds_rings);
+
+skip_rds:
+ if (adapter->tx_ring == NULL)
+ return;
+
+ tx_ring = adapter->tx_ring;
+ vfree(tx_ring->cmd_buf_arr);
+ kfree(tx_ring);
+ adapter->tx_ring = NULL;
+}
+
+int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
+{
+ struct netxen_recv_context *recv_ctx;
+ struct nx_host_rds_ring *rds_ring;
+ struct nx_host_sds_ring *sds_ring;
+ struct nx_host_tx_ring *tx_ring;
+ struct netxen_rx_buffer *rx_buf;
+ int ring, i;
+
+ struct netxen_cmd_buffer *cmd_buf_arr;
+ struct net_device *netdev = adapter->netdev;
+
+ tx_ring = kzalloc(sizeof(struct nx_host_tx_ring), GFP_KERNEL);
+ if (tx_ring == NULL)
+ return -ENOMEM;
+
+ adapter->tx_ring = tx_ring;
+
+ tx_ring->num_desc = adapter->num_txd;
+ tx_ring->txq = netdev_get_tx_queue(netdev, 0);
+
+ cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring));
+ if (cmd_buf_arr == NULL)
+ goto err_out;
+
+ tx_ring->cmd_buf_arr = cmd_buf_arr;
+
+ recv_ctx = &adapter->recv_ctx;
+
+ rds_ring = kcalloc(adapter->max_rds_rings,
+ sizeof(struct nx_host_rds_ring), GFP_KERNEL);
+ if (rds_ring == NULL)
+ goto err_out;
+
+ recv_ctx->rds_rings = rds_ring;
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+ switch (ring) {
+ case RCV_RING_NORMAL:
+ rds_ring->num_desc = adapter->num_rxd;
+ if (adapter->ahw.cut_through) {
+ rds_ring->dma_size =
+ NX_CT_DEFAULT_RX_BUF_LEN;
+ rds_ring->skb_size =
+ NX_CT_DEFAULT_RX_BUF_LEN;
+ } else {
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ rds_ring->dma_size =
+ NX_P3_RX_BUF_MAX_LEN;
+ else
+ rds_ring->dma_size =
+ NX_P2_RX_BUF_MAX_LEN;
+ rds_ring->skb_size =
+ rds_ring->dma_size + NET_IP_ALIGN;
+ }
+ break;
+
+ case RCV_RING_JUMBO:
+ rds_ring->num_desc = adapter->num_jumbo_rxd;
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ rds_ring->dma_size =
+ NX_P3_RX_JUMBO_BUF_MAX_LEN;
+ else
+ rds_ring->dma_size =
+ NX_P2_RX_JUMBO_BUF_MAX_LEN;
+
+ if (adapter->capabilities & NX_CAP0_HW_LRO)
+ rds_ring->dma_size += NX_LRO_BUFFER_EXTRA;
+
+ rds_ring->skb_size =
+ rds_ring->dma_size + NET_IP_ALIGN;
+ break;
+
+ case RCV_RING_LRO:
+ rds_ring->num_desc = adapter->num_lro_rxd;
+ rds_ring->dma_size = NX_RX_LRO_BUFFER_LENGTH;
+ rds_ring->skb_size = rds_ring->dma_size + NET_IP_ALIGN;
+ break;
+
+ }
+ rds_ring->rx_buf_arr = vzalloc(RCV_BUFF_RINGSIZE(rds_ring));
+ if (rds_ring->rx_buf_arr == NULL)
+ /* free whatever was already allocated */
+ goto err_out;
+
+ INIT_LIST_HEAD(&rds_ring->free_list);
+ /*
+ * Now go through all of them, set reference handles
+ * and put them in the queues.
+ */
+ rx_buf = rds_ring->rx_buf_arr;
+ for (i = 0; i < rds_ring->num_desc; i++) {
+ list_add_tail(&rx_buf->list,
+ &rds_ring->free_list);
+ rx_buf->ref_handle = i;
+ rx_buf->state = NETXEN_BUFFER_FREE;
+ rx_buf++;
+ }
+ spin_lock_init(&rds_ring->lock);
+ }
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ sds_ring->irq = adapter->msix_entries[ring].vector;
+ sds_ring->adapter = adapter;
+ sds_ring->num_desc = adapter->num_rxd;
+
+ for (i = 0; i < NUM_RCV_DESC_RINGS; i++)
+ INIT_LIST_HEAD(&sds_ring->free_list[i]);
+ }
+
+ return 0;
+
+err_out:
+ netxen_free_sw_resources(adapter);
+ return -ENOMEM;
+}
+
+/*
+ * netxen_decode_crb_addr(0 - utility to translate from internal Phantom CRB
+ * address to external PCI CRB address.
+ */
+static u32 netxen_decode_crb_addr(u32 addr)
+{
+ int i;
+ u32 base_addr, offset, pci_base;
+
+ crb_addr_transform_setup();
+
+ pci_base = NETXEN_ADDR_ERROR;
+ base_addr = addr & 0xfff00000;
+ offset = addr & 0x000fffff;
+
+ for (i = 0; i < NETXEN_MAX_CRB_XFORM; i++) {
+ if (crb_addr_xform[i] == base_addr) {
+ pci_base = i << 20;
+ break;
+ }
+ }
+ if (pci_base == NETXEN_ADDR_ERROR)
+ return pci_base;
+ else
+ return pci_base + offset;
+}
+
+#define NETXEN_MAX_ROM_WAIT_USEC 100
+
+static int netxen_wait_rom_done(struct netxen_adapter *adapter)
+{
+ long timeout = 0;
+ long done = 0;
+
+ cond_resched();
+
+ while (done == 0) {
+ done = NXRD32(adapter, NETXEN_ROMUSB_GLB_STATUS);
+ done &= 2;
+ if (++timeout >= NETXEN_MAX_ROM_WAIT_USEC) {
+ dev_err(&adapter->pdev->dev,
+ "Timeout reached waiting for rom done");
+ return -EIO;
+ }
+ udelay(1);
+ }
+ return 0;
+}
+
+static int do_rom_fast_read(struct netxen_adapter *adapter,
+ int addr, int *valp)
+{
+ NXWR32(adapter, NETXEN_ROMUSB_ROM_ADDRESS, addr);
+ NXWR32(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
+ NXWR32(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 3);
+ NXWR32(adapter, NETXEN_ROMUSB_ROM_INSTR_OPCODE, 0xb);
+ if (netxen_wait_rom_done(adapter)) {
+ printk("Error waiting for rom done\n");
+ return -EIO;
+ }
+ /* reset abyte_cnt and dummy_byte_cnt */
+ NXWR32(adapter, NETXEN_ROMUSB_ROM_ABYTE_CNT, 0);
+ udelay(10);
+ NXWR32(adapter, NETXEN_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
+
+ *valp = NXRD32(adapter, NETXEN_ROMUSB_ROM_RDATA);
+ return 0;
+}
+
+static int do_rom_fast_read_words(struct netxen_adapter *adapter, int addr,
+ u8 *bytes, size_t size)
+{
+ int addridx;
+ int ret = 0;
+
+ for (addridx = addr; addridx < (addr + size); addridx += 4) {
+ int v;
+ ret = do_rom_fast_read(adapter, addridx, &v);
+ if (ret != 0)
+ break;
+ *(__le32 *)bytes = cpu_to_le32(v);
+ bytes += 4;
+ }
+
+ return ret;
+}
+
+int
+netxen_rom_fast_read_words(struct netxen_adapter *adapter, int addr,
+ u8 *bytes, size_t size)
+{
+ int ret;
+
+ ret = netxen_rom_lock(adapter);
+ if (ret < 0)
+ return ret;
+
+ ret = do_rom_fast_read_words(adapter, addr, bytes, size);
+
+ netxen_rom_unlock(adapter);
+ return ret;
+}
+
+int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp)
+{
+ int ret;
+
+ if (netxen_rom_lock(adapter) != 0)
+ return -EIO;
+
+ ret = do_rom_fast_read(adapter, addr, valp);
+ netxen_rom_unlock(adapter);
+ return ret;
+}
+
+#define NETXEN_BOARDTYPE 0x4008
+#define NETXEN_BOARDNUM 0x400c
+#define NETXEN_CHIPNUM 0x4010
+
+int netxen_pinit_from_rom(struct netxen_adapter *adapter)
+{
+ int addr, val;
+ int i, n, init_delay = 0;
+ struct crb_addr_pair *buf;
+ unsigned offset;
+ u32 off;
+
+ /* resetall */
+ netxen_rom_lock(adapter);
+ NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0xfeffffff);
+ netxen_rom_unlock(adapter);
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+ if (netxen_rom_fast_read(adapter, 0, &n) != 0 ||
+ (n != 0xcafecafe) ||
+ netxen_rom_fast_read(adapter, 4, &n) != 0) {
+ printk(KERN_ERR "%s: ERROR Reading crb_init area: "
+ "n: %08x\n", netxen_nic_driver_name, n);
+ return -EIO;
+ }
+ offset = n & 0xffffU;
+ n = (n >> 16) & 0xffffU;
+ } else {
+ if (netxen_rom_fast_read(adapter, 0, &n) != 0 ||
+ !(n & 0x80000000)) {
+ printk(KERN_ERR "%s: ERROR Reading crb_init area: "
+ "n: %08x\n", netxen_nic_driver_name, n);
+ return -EIO;
+ }
+ offset = 1;
+ n &= ~0x80000000;
+ }
+
+ if (n >= 1024) {
+ printk(KERN_ERR "%s:n=0x%x Error! NetXen card flash not"
+ " initialized.\n", __func__, n);
+ return -EIO;
+ }
+
+ buf = kcalloc(n, sizeof(struct crb_addr_pair), GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < n; i++) {
+ if (netxen_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 ||
+ netxen_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) {
+ kfree(buf);
+ return -EIO;
+ }
+
+ buf[i].addr = addr;
+ buf[i].data = val;
+
+ }
+
+ for (i = 0; i < n; i++) {
+
+ off = netxen_decode_crb_addr(buf[i].addr);
+ if (off == NETXEN_ADDR_ERROR) {
+ printk(KERN_ERR"CRB init value out of range %x\n",
+ buf[i].addr);
+ continue;
+ }
+ off += NETXEN_PCI_CRBSPACE;
+
+ if (off & 1)
+ continue;
+
+ /* skipping cold reboot MAGIC */
+ if (off == NETXEN_CAM_RAM(0x1fc))
+ continue;
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+ if (off == (NETXEN_CRB_I2C0 + 0x1c))
+ continue;
+ /* do not reset PCI */
+ if (off == (ROMUSB_GLB + 0xbc))
+ continue;
+ if (off == (ROMUSB_GLB + 0xa8))
+ continue;
+ if (off == (ROMUSB_GLB + 0xc8)) /* core clock */
+ continue;
+ if (off == (ROMUSB_GLB + 0x24)) /* MN clock */
+ continue;
+ if (off == (ROMUSB_GLB + 0x1c)) /* MS clock */
+ continue;
+ if ((off & 0x0ff00000) == NETXEN_CRB_DDR_NET)
+ continue;
+ if (off == (NETXEN_CRB_PEG_NET_1 + 0x18) &&
+ !NX_IS_REVISION_P3P(adapter->ahw.revision_id))
+ buf[i].data = 0x1020;
+ /* skip the function enable register */
+ if (off == NETXEN_PCIE_REG(PCIE_SETUP_FUNCTION))
+ continue;
+ if (off == NETXEN_PCIE_REG(PCIE_SETUP_FUNCTION2))
+ continue;
+ if ((off & 0x0ff00000) == NETXEN_CRB_SMB)
+ continue;
+ }
+
+ init_delay = 1;
+ /* After writing this register, HW needs time for CRB */
+ /* to quiet down (else crb_window returns 0xffffffff) */
+ if (off == NETXEN_ROMUSB_GLB_SW_RESET) {
+ init_delay = 1000;
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+ /* hold xdma in reset also */
+ buf[i].data = NETXEN_NIC_XDMA_RESET;
+ buf[i].data = 0x8000ff;
+ }
+ }
+
+ NXWR32(adapter, off, buf[i].data);
+
+ msleep(init_delay);
+ }
+ kfree(buf);
+
+ /* disable_peg_cache_all */
+
+ /* unreset_net_cache */
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+ val = NXRD32(adapter, NETXEN_ROMUSB_GLB_SW_RESET);
+ NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, (val & 0xffffff0f));
+ }
+
+ /* p2dn replyCount */
+ NXWR32(adapter, NETXEN_CRB_PEG_NET_D + 0xec, 0x1e);
+ /* disable_peg_cache 0 */
+ NXWR32(adapter, NETXEN_CRB_PEG_NET_D + 0x4c, 8);
+ /* disable_peg_cache 1 */
+ NXWR32(adapter, NETXEN_CRB_PEG_NET_I + 0x4c, 8);
+
+ /* peg_clr_all */
+
+ /* peg_clr 0 */
+ NXWR32(adapter, NETXEN_CRB_PEG_NET_0 + 0x8, 0);
+ NXWR32(adapter, NETXEN_CRB_PEG_NET_0 + 0xc, 0);
+ /* peg_clr 1 */
+ NXWR32(adapter, NETXEN_CRB_PEG_NET_1 + 0x8, 0);
+ NXWR32(adapter, NETXEN_CRB_PEG_NET_1 + 0xc, 0);
+ /* peg_clr 2 */
+ NXWR32(adapter, NETXEN_CRB_PEG_NET_2 + 0x8, 0);
+ NXWR32(adapter, NETXEN_CRB_PEG_NET_2 + 0xc, 0);
+ /* peg_clr 3 */
+ NXWR32(adapter, NETXEN_CRB_PEG_NET_3 + 0x8, 0);
+ NXWR32(adapter, NETXEN_CRB_PEG_NET_3 + 0xc, 0);
+ return 0;
+}
+
+static struct uni_table_desc *nx_get_table_desc(const u8 *unirom, int section)
+{
+ uint32_t i;
+ struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0];
+ __le32 entries = cpu_to_le32(directory->num_entries);
+
+ for (i = 0; i < entries; i++) {
+
+ __le32 offs = cpu_to_le32(directory->findex) +
+ (i * cpu_to_le32(directory->entry_size));
+ __le32 tab_type = cpu_to_le32(*((u32 *)&unirom[offs] + 8));
+
+ if (tab_type == section)
+ return (struct uni_table_desc *) &unirom[offs];
+ }
+
+ return NULL;
+}
+
+#define QLCNIC_FILEHEADER_SIZE (14 * 4)
+
+static int
+netxen_nic_validate_header(struct netxen_adapter *adapter)
+{
+ const u8 *unirom = adapter->fw->data;
+ struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0];
+ u32 fw_file_size = adapter->fw->size;
+ u32 tab_size;
+ __le32 entries;
+ __le32 entry_size;
+
+ if (fw_file_size < QLCNIC_FILEHEADER_SIZE)
+ return -EINVAL;
+
+ entries = cpu_to_le32(directory->num_entries);
+ entry_size = cpu_to_le32(directory->entry_size);
+ tab_size = cpu_to_le32(directory->findex) + (entries * entry_size);
+
+ if (fw_file_size < tab_size)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+netxen_nic_validate_bootld(struct netxen_adapter *adapter)
+{
+ struct uni_table_desc *tab_desc;
+ struct uni_data_desc *descr;
+ const u8 *unirom = adapter->fw->data;
+ __le32 idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
+ NX_UNI_BOOTLD_IDX_OFF));
+ u32 offs;
+ u32 tab_size;
+ u32 data_size;
+
+ tab_desc = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_BOOTLD);
+
+ if (!tab_desc)
+ return -EINVAL;
+
+ tab_size = cpu_to_le32(tab_desc->findex) +
+ (cpu_to_le32(tab_desc->entry_size) * (idx + 1));
+
+ if (adapter->fw->size < tab_size)
+ return -EINVAL;
+
+ offs = cpu_to_le32(tab_desc->findex) +
+ (cpu_to_le32(tab_desc->entry_size) * (idx));
+ descr = (struct uni_data_desc *)&unirom[offs];
+
+ data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size);
+
+ if (adapter->fw->size < data_size)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+netxen_nic_validate_fw(struct netxen_adapter *adapter)
+{
+ struct uni_table_desc *tab_desc;
+ struct uni_data_desc *descr;
+ const u8 *unirom = adapter->fw->data;
+ __le32 idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
+ NX_UNI_FIRMWARE_IDX_OFF));
+ u32 offs;
+ u32 tab_size;
+ u32 data_size;
+
+ tab_desc = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_FW);
+
+ if (!tab_desc)
+ return -EINVAL;
+
+ tab_size = cpu_to_le32(tab_desc->findex) +
+ (cpu_to_le32(tab_desc->entry_size) * (idx + 1));
+
+ if (adapter->fw->size < tab_size)
+ return -EINVAL;
+
+ offs = cpu_to_le32(tab_desc->findex) +
+ (cpu_to_le32(tab_desc->entry_size) * (idx));
+ descr = (struct uni_data_desc *)&unirom[offs];
+ data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size);
+
+ if (adapter->fw->size < data_size)
+ return -EINVAL;
+
+ return 0;
+}
+
+
+static int
+netxen_nic_validate_product_offs(struct netxen_adapter *adapter)
+{
+ struct uni_table_desc *ptab_descr;
+ const u8 *unirom = adapter->fw->data;
+ int mn_present = (NX_IS_REVISION_P2(adapter->ahw.revision_id)) ?
+ 1 : netxen_p3_has_mn(adapter);
+ __le32 entries;
+ __le32 entry_size;
+ u32 tab_size;
+ u32 i;
+
+ ptab_descr = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_PRODUCT_TBL);
+ if (ptab_descr == NULL)
+ return -EINVAL;
+
+ entries = cpu_to_le32(ptab_descr->num_entries);
+ entry_size = cpu_to_le32(ptab_descr->entry_size);
+ tab_size = cpu_to_le32(ptab_descr->findex) + (entries * entry_size);
+
+ if (adapter->fw->size < tab_size)
+ return -EINVAL;
+
+nomn:
+ for (i = 0; i < entries; i++) {
+
+ __le32 flags, file_chiprev, offs;
+ u8 chiprev = adapter->ahw.revision_id;
+ uint32_t flagbit;
+
+ offs = cpu_to_le32(ptab_descr->findex) +
+ (i * cpu_to_le32(ptab_descr->entry_size));
+ flags = cpu_to_le32(*((int *)&unirom[offs] + NX_UNI_FLAGS_OFF));
+ file_chiprev = cpu_to_le32(*((int *)&unirom[offs] +
+ NX_UNI_CHIP_REV_OFF));
+
+ flagbit = mn_present ? 1 : 2;
+
+ if ((chiprev == file_chiprev) &&
+ ((1ULL << flagbit) & flags)) {
+ adapter->file_prd_off = offs;
+ return 0;
+ }
+ }
+
+ if (mn_present && NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+ mn_present = 0;
+ goto nomn;
+ }
+
+ return -EINVAL;
+}
+
+static int
+netxen_nic_validate_unified_romimage(struct netxen_adapter *adapter)
+{
+ if (netxen_nic_validate_header(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "unified image: header validation failed\n");
+ return -EINVAL;
+ }
+
+ if (netxen_nic_validate_product_offs(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "unified image: product validation failed\n");
+ return -EINVAL;
+ }
+
+ if (netxen_nic_validate_bootld(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "unified image: bootld validation failed\n");
+ return -EINVAL;
+ }
+
+ if (netxen_nic_validate_fw(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "unified image: firmware validation failed\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct uni_data_desc *nx_get_data_desc(struct netxen_adapter *adapter,
+ u32 section, u32 idx_offset)
+{
+ const u8 *unirom = adapter->fw->data;
+ int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
+ idx_offset));
+ struct uni_table_desc *tab_desc;
+ __le32 offs;
+
+ tab_desc = nx_get_table_desc(unirom, section);
+
+ if (tab_desc == NULL)
+ return NULL;
+
+ offs = cpu_to_le32(tab_desc->findex) +
+ (cpu_to_le32(tab_desc->entry_size) * idx);
+
+ return (struct uni_data_desc *)&unirom[offs];
+}
+
+static u8 *
+nx_get_bootld_offs(struct netxen_adapter *adapter)
+{
+ u32 offs = NETXEN_BOOTLD_START;
+
+ if (adapter->fw_type == NX_UNIFIED_ROMIMAGE)
+ offs = cpu_to_le32((nx_get_data_desc(adapter,
+ NX_UNI_DIR_SECT_BOOTLD,
+ NX_UNI_BOOTLD_IDX_OFF))->findex);
+
+ return (u8 *)&adapter->fw->data[offs];
+}
+
+static u8 *
+nx_get_fw_offs(struct netxen_adapter *adapter)
+{
+ u32 offs = NETXEN_IMAGE_START;
+
+ if (adapter->fw_type == NX_UNIFIED_ROMIMAGE)
+ offs = cpu_to_le32((nx_get_data_desc(adapter,
+ NX_UNI_DIR_SECT_FW,
+ NX_UNI_FIRMWARE_IDX_OFF))->findex);
+
+ return (u8 *)&adapter->fw->data[offs];
+}
+
+static __le32
+nx_get_fw_size(struct netxen_adapter *adapter)
+{
+ if (adapter->fw_type == NX_UNIFIED_ROMIMAGE)
+ return cpu_to_le32((nx_get_data_desc(adapter,
+ NX_UNI_DIR_SECT_FW,
+ NX_UNI_FIRMWARE_IDX_OFF))->size);
+ else
+ return cpu_to_le32(
+ *(u32 *)&adapter->fw->data[NX_FW_SIZE_OFFSET]);
+}
+
+static __le32
+nx_get_fw_version(struct netxen_adapter *adapter)
+{
+ struct uni_data_desc *fw_data_desc;
+ const struct firmware *fw = adapter->fw;
+ __le32 major, minor, sub;
+ const u8 *ver_str;
+ int i, ret = 0;
+
+ if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) {
+
+ fw_data_desc = nx_get_data_desc(adapter,
+ NX_UNI_DIR_SECT_FW, NX_UNI_FIRMWARE_IDX_OFF);
+ ver_str = fw->data + cpu_to_le32(fw_data_desc->findex) +
+ cpu_to_le32(fw_data_desc->size) - 17;
+
+ for (i = 0; i < 12; i++) {
+ if (!strncmp(&ver_str[i], "REV=", 4)) {
+ ret = sscanf(&ver_str[i+4], "%u.%u.%u ",
+ &major, &minor, &sub);
+ break;
+ }
+ }
+
+ if (ret != 3)
+ return 0;
+
+ return major + (minor << 8) + (sub << 16);
+
+ } else
+ return cpu_to_le32(*(u32 *)&fw->data[NX_FW_VERSION_OFFSET]);
+}
+
+static __le32
+nx_get_bios_version(struct netxen_adapter *adapter)
+{
+ const struct firmware *fw = adapter->fw;
+ __le32 bios_ver, prd_off = adapter->file_prd_off;
+
+ if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) {
+ bios_ver = cpu_to_le32(*((u32 *) (&fw->data[prd_off])
+ + NX_UNI_BIOS_VERSION_OFF));
+ return (bios_ver << 16) + ((bios_ver >> 8) & 0xff00) +
+ (bios_ver >> 24);
+ } else
+ return cpu_to_le32(*(u32 *)&fw->data[NX_BIOS_VERSION_OFFSET]);
+
+}
+
+int
+netxen_need_fw_reset(struct netxen_adapter *adapter)
+{
+ u32 count, old_count;
+ u32 val, version, major, minor, build;
+ int i, timeout;
+ u8 fw_type;
+
+ /* NX2031 firmware doesn't support heartbit */
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ return 1;
+
+ if (adapter->need_fw_reset)
+ return 1;
+
+ /* last attempt had failed */
+ if (NXRD32(adapter, CRB_CMDPEG_STATE) == PHAN_INITIALIZE_FAILED)
+ return 1;
+
+ old_count = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER);
+
+ for (i = 0; i < 10; i++) {
+
+ timeout = msleep_interruptible(200);
+ if (timeout) {
+ NXWR32(adapter, CRB_CMDPEG_STATE,
+ PHAN_INITIALIZE_FAILED);
+ return -EINTR;
+ }
+
+ count = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER);
+ if (count != old_count)
+ break;
+ }
+
+ /* firmware is dead */
+ if (count == old_count)
+ return 1;
+
+ /* check if we have got newer or different file firmware */
+ if (adapter->fw) {
+
+ val = nx_get_fw_version(adapter);
+
+ version = NETXEN_DECODE_VERSION(val);
+
+ major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR);
+ minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR);
+ build = NXRD32(adapter, NETXEN_FW_VERSION_SUB);
+
+ if (version > NETXEN_VERSION_CODE(major, minor, build))
+ return 1;
+
+ if (version == NETXEN_VERSION_CODE(major, minor, build) &&
+ adapter->fw_type != NX_UNIFIED_ROMIMAGE) {
+
+ val = NXRD32(adapter, NETXEN_MIU_MN_CONTROL);
+ fw_type = (val & 0x4) ?
+ NX_P3_CT_ROMIMAGE : NX_P3_MN_ROMIMAGE;
+
+ if (adapter->fw_type != fw_type)
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+#define NETXEN_MIN_P3_FW_SUPP NETXEN_VERSION_CODE(4, 0, 505)
+
+int
+netxen_check_flash_fw_compatibility(struct netxen_adapter *adapter)
+{
+ u32 flash_fw_ver, min_fw_ver;
+
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ return 0;
+
+ if (netxen_rom_fast_read(adapter,
+ NX_FW_VERSION_OFFSET, (int *)&flash_fw_ver)) {
+ dev_err(&adapter->pdev->dev, "Unable to read flash fw"
+ "version\n");
+ return -EIO;
+ }
+
+ flash_fw_ver = NETXEN_DECODE_VERSION(flash_fw_ver);
+ min_fw_ver = NETXEN_MIN_P3_FW_SUPP;
+ if (flash_fw_ver >= min_fw_ver)
+ return 0;
+
+ dev_info(&adapter->pdev->dev, "Flash fw[%d.%d.%d] is < min fw supported"
+ "[4.0.505]. Please update firmware on flash\n",
+ _major(flash_fw_ver), _minor(flash_fw_ver),
+ _build(flash_fw_ver));
+ return -EINVAL;
+}
+
+static char *fw_name[] = {
+ NX_P2_MN_ROMIMAGE_NAME,
+ NX_P3_CT_ROMIMAGE_NAME,
+ NX_P3_MN_ROMIMAGE_NAME,
+ NX_UNIFIED_ROMIMAGE_NAME,
+ NX_FLASH_ROMIMAGE_NAME,
+};
+
+int
+netxen_load_firmware(struct netxen_adapter *adapter)
+{
+ u64 *ptr64;
+ u32 i, flashaddr, size;
+ const struct firmware *fw = adapter->fw;
+ struct pci_dev *pdev = adapter->pdev;
+
+ dev_info(&pdev->dev, "loading firmware from %s\n",
+ fw_name[adapter->fw_type]);
+
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ NXWR32(adapter, NETXEN_ROMUSB_GLB_CAS_RST, 1);
+
+ if (fw) {
+ __le64 data;
+
+ size = (NETXEN_IMAGE_START - NETXEN_BOOTLD_START) / 8;
+
+ ptr64 = (u64 *)nx_get_bootld_offs(adapter);
+ flashaddr = NETXEN_BOOTLD_START;
+
+ for (i = 0; i < size; i++) {
+ data = cpu_to_le64(ptr64[i]);
+
+ if (adapter->pci_mem_write(adapter, flashaddr, data))
+ return -EIO;
+
+ flashaddr += 8;
+ }
+
+ size = (__force u32)nx_get_fw_size(adapter) / 8;
+
+ ptr64 = (u64 *)nx_get_fw_offs(adapter);
+ flashaddr = NETXEN_IMAGE_START;
+
+ for (i = 0; i < size; i++) {
+ data = cpu_to_le64(ptr64[i]);
+
+ if (adapter->pci_mem_write(adapter,
+ flashaddr, data))
+ return -EIO;
+
+ flashaddr += 8;
+ }
+
+ size = (__force u32)nx_get_fw_size(adapter) % 8;
+ if (size) {
+ data = cpu_to_le64(ptr64[i]);
+
+ if (adapter->pci_mem_write(adapter,
+ flashaddr, data))
+ return -EIO;
+ }
+
+ } else {
+ u64 data;
+ u32 hi, lo;
+
+ size = (NETXEN_IMAGE_START - NETXEN_BOOTLD_START) / 8;
+ flashaddr = NETXEN_BOOTLD_START;
+
+ for (i = 0; i < size; i++) {
+ if (netxen_rom_fast_read(adapter,
+ flashaddr, (int *)&lo) != 0)
+ return -EIO;
+ if (netxen_rom_fast_read(adapter,
+ flashaddr + 4, (int *)&hi) != 0)
+ return -EIO;
+
+ /* hi, lo are already in host endian byteorder */
+ data = (((u64)hi << 32) | lo);
+
+ if (adapter->pci_mem_write(adapter,
+ flashaddr, data))
+ return -EIO;
+
+ flashaddr += 8;
+ }
+ }
+ msleep(1);
+
+ if (NX_IS_REVISION_P3P(adapter->ahw.revision_id)) {
+ NXWR32(adapter, NETXEN_CRB_PEG_NET_0 + 0x18, 0x1020);
+ NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0x80001e);
+ } else if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0x80001d);
+ else {
+ NXWR32(adapter, NETXEN_ROMUSB_GLB_CHIP_CLK_CTRL, 0x3fff);
+ NXWR32(adapter, NETXEN_ROMUSB_GLB_CAS_RST, 0);
+ }
+
+ return 0;
+}
+
+static int
+netxen_validate_firmware(struct netxen_adapter *adapter)
+{
+ __le32 val;
+ __le32 flash_fw_ver;
+ u32 file_fw_ver, min_ver, bios;
+ struct pci_dev *pdev = adapter->pdev;
+ const struct firmware *fw = adapter->fw;
+ u8 fw_type = adapter->fw_type;
+ u32 crbinit_fix_fw;
+
+ if (fw_type == NX_UNIFIED_ROMIMAGE) {
+ if (netxen_nic_validate_unified_romimage(adapter))
+ return -EINVAL;
+ } else {
+ val = cpu_to_le32(*(u32 *)&fw->data[NX_FW_MAGIC_OFFSET]);
+ if ((__force u32)val != NETXEN_BDINFO_MAGIC)
+ return -EINVAL;
+
+ if (fw->size < NX_FW_MIN_SIZE)
+ return -EINVAL;
+ }
+
+ val = nx_get_fw_version(adapter);
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ min_ver = NETXEN_MIN_P3_FW_SUPP;
+ else
+ min_ver = NETXEN_VERSION_CODE(3, 4, 216);
+
+ file_fw_ver = NETXEN_DECODE_VERSION(val);
+
+ if ((_major(file_fw_ver) > _NETXEN_NIC_LINUX_MAJOR) ||
+ (file_fw_ver < min_ver)) {
+ dev_err(&pdev->dev,
+ "%s: firmware version %d.%d.%d unsupported\n",
+ fw_name[fw_type], _major(file_fw_ver), _minor(file_fw_ver),
+ _build(file_fw_ver));
+ return -EINVAL;
+ }
+ val = nx_get_bios_version(adapter);
+ if (netxen_rom_fast_read(adapter, NX_BIOS_VERSION_OFFSET, (int *)&bios))
+ return -EIO;
+ if ((__force u32)val != bios) {
+ dev_err(&pdev->dev, "%s: firmware bios is incompatible\n",
+ fw_name[fw_type]);
+ return -EINVAL;
+ }
+
+ if (netxen_rom_fast_read(adapter,
+ NX_FW_VERSION_OFFSET, (int *)&flash_fw_ver)) {
+ dev_err(&pdev->dev, "Unable to read flash fw version\n");
+ return -EIO;
+ }
+ flash_fw_ver = NETXEN_DECODE_VERSION(flash_fw_ver);
+
+ /* New fw from file is not allowed, if fw on flash is < 4.0.554 */
+ crbinit_fix_fw = NETXEN_VERSION_CODE(4, 0, 554);
+ if (file_fw_ver >= crbinit_fix_fw && flash_fw_ver < crbinit_fix_fw &&
+ NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+ dev_err(&pdev->dev, "Incompatibility detected between driver "
+ "and firmware version on flash. This configuration "
+ "is not recommended. Please update the firmware on "
+ "flash immediately\n");
+ return -EINVAL;
+ }
+
+ /* check if flashed firmware is newer only for no-mn and P2 case*/
+ if (!netxen_p3_has_mn(adapter) ||
+ NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+ if (flash_fw_ver > file_fw_ver) {
+ dev_info(&pdev->dev, "%s: firmware is older than flash\n",
+ fw_name[fw_type]);
+ return -EINVAL;
+ }
+ }
+
+ NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), NETXEN_BDINFO_MAGIC);
+ return 0;
+}
+
+static void
+nx_get_next_fwtype(struct netxen_adapter *adapter)
+{
+ u8 fw_type;
+
+ switch (adapter->fw_type) {
+ case NX_UNKNOWN_ROMIMAGE:
+ fw_type = NX_UNIFIED_ROMIMAGE;
+ break;
+
+ case NX_UNIFIED_ROMIMAGE:
+ if (NX_IS_REVISION_P3P(adapter->ahw.revision_id))
+ fw_type = NX_FLASH_ROMIMAGE;
+ else if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ fw_type = NX_P2_MN_ROMIMAGE;
+ else if (netxen_p3_has_mn(adapter))
+ fw_type = NX_P3_MN_ROMIMAGE;
+ else
+ fw_type = NX_P3_CT_ROMIMAGE;
+ break;
+
+ case NX_P3_MN_ROMIMAGE:
+ fw_type = NX_P3_CT_ROMIMAGE;
+ break;
+
+ case NX_P2_MN_ROMIMAGE:
+ case NX_P3_CT_ROMIMAGE:
+ default:
+ fw_type = NX_FLASH_ROMIMAGE;
+ break;
+ }
+
+ adapter->fw_type = fw_type;
+}
+
+static int
+netxen_p3_has_mn(struct netxen_adapter *adapter)
+{
+ u32 capability, flashed_ver;
+ capability = 0;
+
+ /* NX2031 always had MN */
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ return 1;
+
+ netxen_rom_fast_read(adapter,
+ NX_FW_VERSION_OFFSET, (int *)&flashed_ver);
+ flashed_ver = NETXEN_DECODE_VERSION(flashed_ver);
+
+ if (flashed_ver >= NETXEN_VERSION_CODE(4, 0, 220)) {
+
+ capability = NXRD32(adapter, NX_PEG_TUNE_CAPABILITY);
+ if (capability & NX_PEG_TUNE_MN_PRESENT)
+ return 1;
+ }
+ return 0;
+}
+
+void netxen_request_firmware(struct netxen_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int rc = 0;
+
+ adapter->fw_type = NX_UNKNOWN_ROMIMAGE;
+
+next:
+ nx_get_next_fwtype(adapter);
+
+ if (adapter->fw_type == NX_FLASH_ROMIMAGE) {
+ adapter->fw = NULL;
+ } else {
+ rc = request_firmware(&adapter->fw,
+ fw_name[adapter->fw_type], &pdev->dev);
+ if (rc != 0)
+ goto next;
+
+ rc = netxen_validate_firmware(adapter);
+ if (rc != 0) {
+ release_firmware(adapter->fw);
+ msleep(1);
+ goto next;
+ }
+ }
+}
+
+
+void
+netxen_release_firmware(struct netxen_adapter *adapter)
+{
+ release_firmware(adapter->fw);
+ adapter->fw = NULL;
+}
+
+int netxen_init_dummy_dma(struct netxen_adapter *adapter)
+{
+ u64 addr;
+ u32 hi, lo;
+
+ if (!NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ return 0;
+
+ adapter->dummy_dma.addr = dma_alloc_coherent(&adapter->pdev->dev,
+ NETXEN_HOST_DUMMY_DMA_SIZE,
+ &adapter->dummy_dma.phys_addr,
+ GFP_KERNEL);
+ if (adapter->dummy_dma.addr == NULL) {
+ dev_err(&adapter->pdev->dev,
+ "ERROR: Could not allocate dummy DMA memory\n");
+ return -ENOMEM;
+ }
+
+ addr = (uint64_t) adapter->dummy_dma.phys_addr;
+ hi = (addr >> 32) & 0xffffffff;
+ lo = addr & 0xffffffff;
+
+ NXWR32(adapter, CRB_HOST_DUMMY_BUF_ADDR_HI, hi);
+ NXWR32(adapter, CRB_HOST_DUMMY_BUF_ADDR_LO, lo);
+
+ return 0;
+}
+
+/*
+ * NetXen DMA watchdog control:
+ *
+ * Bit 0 : enabled => R/O: 1 watchdog active, 0 inactive
+ * Bit 1 : disable_request => 1 req disable dma watchdog
+ * Bit 2 : enable_request => 1 req enable dma watchdog
+ * Bit 3-31 : unused
+ */
+void netxen_free_dummy_dma(struct netxen_adapter *adapter)
+{
+ int i = 100;
+ u32 ctrl;
+
+ if (!NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ return;
+
+ if (!adapter->dummy_dma.addr)
+ return;
+
+ ctrl = NXRD32(adapter, NETXEN_DMA_WATCHDOG_CTRL);
+ if ((ctrl & 0x1) != 0) {
+ NXWR32(adapter, NETXEN_DMA_WATCHDOG_CTRL, (ctrl | 0x2));
+
+ while ((ctrl & 0x1) != 0) {
+
+ msleep(50);
+
+ ctrl = NXRD32(adapter, NETXEN_DMA_WATCHDOG_CTRL);
+
+ if (--i == 0)
+ break;
+ }
+ }
+
+ if (i) {
+ dma_free_coherent(&adapter->pdev->dev,
+ NETXEN_HOST_DUMMY_DMA_SIZE,
+ adapter->dummy_dma.addr,
+ adapter->dummy_dma.phys_addr);
+ adapter->dummy_dma.addr = NULL;
+ } else
+ dev_err(&adapter->pdev->dev, "dma_watchdog_shutdown failed\n");
+}
+
+int netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val)
+{
+ u32 val = 0;
+ int retries = 60;
+
+ if (pegtune_val)
+ return 0;
+
+ do {
+ val = NXRD32(adapter, CRB_CMDPEG_STATE);
+ switch (val) {
+ case PHAN_INITIALIZE_COMPLETE:
+ case PHAN_INITIALIZE_ACK:
+ return 0;
+ case PHAN_INITIALIZE_FAILED:
+ goto out_err;
+ default:
+ break;
+ }
+
+ msleep(500);
+
+ } while (--retries);
+
+ NXWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED);
+
+out_err:
+ dev_warn(&adapter->pdev->dev, "firmware init failed\n");
+ return -EIO;
+}
+
+static int
+netxen_receive_peg_ready(struct netxen_adapter *adapter)
+{
+ u32 val = 0;
+ int retries = 2000;
+
+ do {
+ val = NXRD32(adapter, CRB_RCVPEG_STATE);
+
+ if (val == PHAN_PEG_RCV_INITIALIZED)
+ return 0;
+
+ msleep(10);
+
+ } while (--retries);
+
+ pr_err("Receive Peg initialization not complete, state: 0x%x.\n", val);
+ return -EIO;
+}
+
+int netxen_init_firmware(struct netxen_adapter *adapter)
+{
+ int err;
+
+ err = netxen_receive_peg_ready(adapter);
+ if (err)
+ return err;
+
+ NXWR32(adapter, CRB_NIC_CAPABILITIES_HOST, INTR_SCHEME_PERPORT);
+ NXWR32(adapter, CRB_MPORT_MODE, MPORT_MULTI_FUNCTION_MODE);
+ NXWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK);
+
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ NXWR32(adapter, CRB_NIC_MSI_MODE_HOST, MSI_MODE_MULTIFUNC);
+
+ return err;
+}
+
+static void
+netxen_handle_linkevent(struct netxen_adapter *adapter, nx_fw_msg_t *msg)
+{
+ u32 cable_OUI;
+ u16 cable_len;
+ u16 link_speed;
+ u8 link_status, module, duplex, autoneg;
+ struct net_device *netdev = adapter->netdev;
+
+ adapter->has_link_events = 1;
+
+ cable_OUI = msg->body[1] & 0xffffffff;
+ cable_len = (msg->body[1] >> 32) & 0xffff;
+ link_speed = (msg->body[1] >> 48) & 0xffff;
+
+ link_status = msg->body[2] & 0xff;
+ duplex = (msg->body[2] >> 16) & 0xff;
+ autoneg = (msg->body[2] >> 24) & 0xff;
+
+ module = (msg->body[2] >> 8) & 0xff;
+ if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE) {
+ printk(KERN_INFO "%s: unsupported cable: OUI 0x%x, length %d\n",
+ netdev->name, cable_OUI, cable_len);
+ } else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN) {
+ printk(KERN_INFO "%s: unsupported cable length %d\n",
+ netdev->name, cable_len);
+ }
+
+ /* update link parameters */
+ if (duplex == LINKEVENT_FULL_DUPLEX)
+ adapter->link_duplex = DUPLEX_FULL;
+ else
+ adapter->link_duplex = DUPLEX_HALF;
+ adapter->module_type = module;
+ adapter->link_autoneg = autoneg;
+ adapter->link_speed = link_speed;
+
+ netxen_advert_link_change(adapter, link_status);
+}
+
+static void
+netxen_handle_fw_message(int desc_cnt, int index,
+ struct nx_host_sds_ring *sds_ring)
+{
+ nx_fw_msg_t msg;
+ struct status_desc *desc;
+ int i = 0, opcode;
+
+ while (desc_cnt > 0 && i < 8) {
+ desc = &sds_ring->desc_head[index];
+ msg.words[i++] = le64_to_cpu(desc->status_desc_data[0]);
+ msg.words[i++] = le64_to_cpu(desc->status_desc_data[1]);
+
+ index = get_next_index(index, sds_ring->num_desc);
+ desc_cnt--;
+ }
+
+ opcode = netxen_get_nic_msg_opcode(msg.body[0]);
+ switch (opcode) {
+ case NX_NIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE:
+ netxen_handle_linkevent(sds_ring->adapter, &msg);
+ break;
+ default:
+ break;
+ }
+}
+
+static int
+netxen_alloc_rx_skb(struct netxen_adapter *adapter,
+ struct nx_host_rds_ring *rds_ring,
+ struct netxen_rx_buffer *buffer)
+{
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ struct pci_dev *pdev = adapter->pdev;
+
+ buffer->skb = netdev_alloc_skb(adapter->netdev, rds_ring->skb_size);
+ if (!buffer->skb)
+ return 1;
+
+ skb = buffer->skb;
+
+ if (!adapter->ahw.cut_through)
+ skb_reserve(skb, 2);
+
+ dma = dma_map_single(&pdev->dev, skb->data, rds_ring->dma_size,
+ DMA_FROM_DEVICE);
+
+ if (dma_mapping_error(&pdev->dev, dma)) {
+ dev_kfree_skb_any(skb);
+ buffer->skb = NULL;
+ return 1;
+ }
+
+ buffer->skb = skb;
+ buffer->dma = dma;
+ buffer->state = NETXEN_BUFFER_BUSY;
+
+ return 0;
+}
+
+static struct sk_buff *netxen_process_rxbuf(struct netxen_adapter *adapter,
+ struct nx_host_rds_ring *rds_ring, u16 index, u16 cksum)
+{
+ struct netxen_rx_buffer *buffer;
+ struct sk_buff *skb;
+
+ buffer = &rds_ring->rx_buf_arr[index];
+
+ dma_unmap_single(&adapter->pdev->dev, buffer->dma, rds_ring->dma_size,
+ DMA_FROM_DEVICE);
+
+ skb = buffer->skb;
+ if (!skb)
+ goto no_skb;
+
+ if (likely((adapter->netdev->features & NETIF_F_RXCSUM)
+ && cksum == STATUS_CKSUM_OK)) {
+ adapter->stats.csummed++;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else
+ skb->ip_summed = CHECKSUM_NONE;
+
+ buffer->skb = NULL;
+no_skb:
+ buffer->state = NETXEN_BUFFER_FREE;
+ return skb;
+}
+
+static struct netxen_rx_buffer *
+netxen_process_rcv(struct netxen_adapter *adapter,
+ struct nx_host_sds_ring *sds_ring,
+ int ring, u64 sts_data0)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+ struct netxen_rx_buffer *buffer;
+ struct sk_buff *skb;
+ struct nx_host_rds_ring *rds_ring;
+ int index, length, cksum, pkt_offset;
+
+ if (unlikely(ring >= adapter->max_rds_rings))
+ return NULL;
+
+ rds_ring = &recv_ctx->rds_rings[ring];
+
+ index = netxen_get_sts_refhandle(sts_data0);
+ if (unlikely(index >= rds_ring->num_desc))
+ return NULL;
+
+ buffer = &rds_ring->rx_buf_arr[index];
+
+ length = netxen_get_sts_totallength(sts_data0);
+ cksum = netxen_get_sts_status(sts_data0);
+ pkt_offset = netxen_get_sts_pkt_offset(sts_data0);
+
+ skb = netxen_process_rxbuf(adapter, rds_ring, index, cksum);
+ if (!skb)
+ return buffer;
+
+ if (length > rds_ring->skb_size)
+ skb_put(skb, rds_ring->skb_size);
+ else
+ skb_put(skb, length);
+
+
+ if (pkt_offset)
+ skb_pull(skb, pkt_offset);
+
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ napi_gro_receive(&sds_ring->napi, skb);
+
+ adapter->stats.rx_pkts++;
+ adapter->stats.rxbytes += length;
+
+ return buffer;
+}
+
+#define TCP_HDR_SIZE 20
+#define TCP_TS_OPTION_SIZE 12
+#define TCP_TS_HDR_SIZE (TCP_HDR_SIZE + TCP_TS_OPTION_SIZE)
+
+static struct netxen_rx_buffer *
+netxen_process_lro(struct netxen_adapter *adapter,
+ struct nx_host_sds_ring *sds_ring,
+ int ring, u64 sts_data0, u64 sts_data1)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+ struct netxen_rx_buffer *buffer;
+ struct sk_buff *skb;
+ struct nx_host_rds_ring *rds_ring;
+ struct iphdr *iph;
+ struct tcphdr *th;
+ bool push, timestamp;
+ int l2_hdr_offset, l4_hdr_offset;
+ int index;
+ u16 lro_length, length, data_offset;
+ u32 seq_number;
+ u8 vhdr_len = 0;
+
+ if (unlikely(ring >= adapter->max_rds_rings))
+ return NULL;
+
+ rds_ring = &recv_ctx->rds_rings[ring];
+
+ index = netxen_get_lro_sts_refhandle(sts_data0);
+ if (unlikely(index >= rds_ring->num_desc))
+ return NULL;
+
+ buffer = &rds_ring->rx_buf_arr[index];
+
+ timestamp = netxen_get_lro_sts_timestamp(sts_data0);
+ lro_length = netxen_get_lro_sts_length(sts_data0);
+ l2_hdr_offset = netxen_get_lro_sts_l2_hdr_offset(sts_data0);
+ l4_hdr_offset = netxen_get_lro_sts_l4_hdr_offset(sts_data0);
+ push = netxen_get_lro_sts_push_flag(sts_data0);
+ seq_number = netxen_get_lro_sts_seq_number(sts_data1);
+
+ skb = netxen_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
+ if (!skb)
+ return buffer;
+
+ if (timestamp)
+ data_offset = l4_hdr_offset + TCP_TS_HDR_SIZE;
+ else
+ data_offset = l4_hdr_offset + TCP_HDR_SIZE;
+
+ skb_put(skb, lro_length + data_offset);
+
+ skb_pull(skb, l2_hdr_offset);
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ if (skb->protocol == htons(ETH_P_8021Q))
+ vhdr_len = VLAN_HLEN;
+ iph = (struct iphdr *)(skb->data + vhdr_len);
+ th = (struct tcphdr *)((skb->data + vhdr_len) + (iph->ihl << 2));
+
+ length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
+ csum_replace2(&iph->check, iph->tot_len, htons(length));
+ iph->tot_len = htons(length);
+ th->psh = push;
+ th->seq = htonl(seq_number);
+
+ length = skb->len;
+
+ if (adapter->flags & NETXEN_FW_MSS_CAP)
+ skb_shinfo(skb)->gso_size = netxen_get_lro_sts_mss(sts_data1);
+
+ netif_receive_skb(skb);
+
+ adapter->stats.lro_pkts++;
+ adapter->stats.rxbytes += length;
+
+ return buffer;
+}
+
+#define netxen_merge_rx_buffers(list, head) \
+ do { list_splice_tail_init(list, head); } while (0);
+
+int
+netxen_process_rcv_ring(struct nx_host_sds_ring *sds_ring, int max)
+{
+ struct netxen_adapter *adapter = sds_ring->adapter;
+
+ struct list_head *cur;
+
+ struct status_desc *desc;
+ struct netxen_rx_buffer *rxbuf;
+
+ u32 consumer = sds_ring->consumer;
+
+ int count = 0;
+ u64 sts_data0, sts_data1;
+ int opcode, ring = 0, desc_cnt;
+
+ while (count < max) {
+ desc = &sds_ring->desc_head[consumer];
+ sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
+
+ if (!(sts_data0 & STATUS_OWNER_HOST))
+ break;
+
+ desc_cnt = netxen_get_sts_desc_cnt(sts_data0);
+
+ opcode = netxen_get_sts_opcode(sts_data0);
+
+ switch (opcode) {
+ case NETXEN_NIC_RXPKT_DESC:
+ case NETXEN_OLD_RXPKT_DESC:
+ case NETXEN_NIC_SYN_OFFLOAD:
+ ring = netxen_get_sts_type(sts_data0);
+ rxbuf = netxen_process_rcv(adapter, sds_ring,
+ ring, sts_data0);
+ break;
+ case NETXEN_NIC_LRO_DESC:
+ ring = netxen_get_lro_sts_type(sts_data0);
+ sts_data1 = le64_to_cpu(desc->status_desc_data[1]);
+ rxbuf = netxen_process_lro(adapter, sds_ring,
+ ring, sts_data0, sts_data1);
+ break;
+ case NETXEN_NIC_RESPONSE_DESC:
+ netxen_handle_fw_message(desc_cnt, consumer, sds_ring);
+ goto skip;
+ default:
+ goto skip;
+ }
+
+ WARN_ON(desc_cnt > 1);
+
+ if (rxbuf)
+ list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
+
+skip:
+ for (; desc_cnt > 0; desc_cnt--) {
+ desc = &sds_ring->desc_head[consumer];
+ desc->status_desc_data[0] =
+ cpu_to_le64(STATUS_OWNER_PHANTOM);
+ consumer = get_next_index(consumer, sds_ring->num_desc);
+ }
+ count++;
+ }
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ struct nx_host_rds_ring *rds_ring =
+ &adapter->recv_ctx.rds_rings[ring];
+
+ if (!list_empty(&sds_ring->free_list[ring])) {
+ list_for_each(cur, &sds_ring->free_list[ring]) {
+ rxbuf = list_entry(cur,
+ struct netxen_rx_buffer, list);
+ netxen_alloc_rx_skb(adapter, rds_ring, rxbuf);
+ }
+ spin_lock(&rds_ring->lock);
+ netxen_merge_rx_buffers(&sds_ring->free_list[ring],
+ &rds_ring->free_list);
+ spin_unlock(&rds_ring->lock);
+ }
+
+ netxen_post_rx_buffers_nodb(adapter, rds_ring);
+ }
+
+ if (count) {
+ sds_ring->consumer = consumer;
+ NXWRIO(adapter, sds_ring->crb_sts_consumer, consumer);
+ }
+
+ return count;
+}
+
+/* Process Command status ring */
+int netxen_process_cmd_ring(struct netxen_adapter *adapter)
+{
+ u32 sw_consumer, hw_consumer;
+ int count = 0, i;
+ struct netxen_cmd_buffer *buffer;
+ struct pci_dev *pdev = adapter->pdev;
+ struct net_device *netdev = adapter->netdev;
+ struct netxen_skb_frag *frag;
+ int done = 0;
+ struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
+
+ if (!spin_trylock_bh(&adapter->tx_clean_lock))
+ return 1;
+
+ sw_consumer = tx_ring->sw_consumer;
+ hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
+
+ while (sw_consumer != hw_consumer) {
+ buffer = &tx_ring->cmd_buf_arr[sw_consumer];
+ if (buffer->skb) {
+ frag = &buffer->frag_array[0];
+ dma_unmap_single(&pdev->dev, frag->dma, frag->length,
+ DMA_TO_DEVICE);
+ frag->dma = 0ULL;
+ for (i = 1; i < buffer->frag_count; i++) {
+ frag++; /* Get the next frag */
+ dma_unmap_page(&pdev->dev, frag->dma,
+ frag->length, DMA_TO_DEVICE);
+ frag->dma = 0ULL;
+ }
+
+ adapter->stats.xmitfinished++;
+ dev_kfree_skb_any(buffer->skb);
+ buffer->skb = NULL;
+ }
+
+ sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
+ if (++count >= MAX_STATUS_HANDLE)
+ break;
+ }
+
+ tx_ring->sw_consumer = sw_consumer;
+
+ if (count && netif_running(netdev)) {
+ smp_mb();
+
+ if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev))
+ if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH)
+ netif_wake_queue(netdev);
+ adapter->tx_timeo_cnt = 0;
+ }
+ /*
+ * If everything is freed up to consumer then check if the ring is full
+ * If the ring is full then check if more needs to be freed and
+ * schedule the call back again.
+ *
+ * This happens when there are 2 CPUs. One could be freeing and the
+ * other filling it. If the ring is full when we get out of here and
+ * the card has already interrupted the host then the host can miss the
+ * interrupt.
+ *
+ * There is still a possible race condition and the host could miss an
+ * interrupt. The card has to take care of this.
+ */
+ hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
+ done = (sw_consumer == hw_consumer);
+ spin_unlock_bh(&adapter->tx_clean_lock);
+
+ return done;
+}
+
+void
+netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid,
+ struct nx_host_rds_ring *rds_ring)
+{
+ struct rcv_desc *pdesc;
+ struct netxen_rx_buffer *buffer;
+ int producer, count = 0;
+ netxen_ctx_msg msg = 0;
+ struct list_head *head;
+
+ producer = rds_ring->producer;
+
+ head = &rds_ring->free_list;
+ while (!list_empty(head)) {
+
+ buffer = list_entry(head->next, struct netxen_rx_buffer, list);
+
+ if (!buffer->skb) {
+ if (netxen_alloc_rx_skb(adapter, rds_ring, buffer))
+ break;
+ }
+
+ count++;
+ list_del(&buffer->list);
+
+ /* make a rcv descriptor */
+ pdesc = &rds_ring->desc_head[producer];
+ pdesc->addr_buffer = cpu_to_le64(buffer->dma);
+ pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
+ pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
+
+ producer = get_next_index(producer, rds_ring->num_desc);
+ }
+
+ if (count) {
+ rds_ring->producer = producer;
+ NXWRIO(adapter, rds_ring->crb_rcv_producer,
+ (producer-1) & (rds_ring->num_desc-1));
+
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+ /*
+ * Write a doorbell msg to tell phanmon of change in
+ * receive ring producer
+ * Only for firmware version < 4.0.0
+ */
+ netxen_set_msg_peg_id(msg, NETXEN_RCV_PEG_DB_ID);
+ netxen_set_msg_privid(msg);
+ netxen_set_msg_count(msg,
+ ((producer - 1) &
+ (rds_ring->num_desc - 1)));
+ netxen_set_msg_ctxid(msg, adapter->portnum);
+ netxen_set_msg_opcode(msg, NETXEN_RCV_PRODUCER(ringid));
+ NXWRIO(adapter, DB_NORMALIZE(adapter,
+ NETXEN_RCV_PRODUCER_OFFSET), msg);
+ }
+ }
+}
+
+static void
+netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter,
+ struct nx_host_rds_ring *rds_ring)
+{
+ struct rcv_desc *pdesc;
+ struct netxen_rx_buffer *buffer;
+ int producer, count = 0;
+ struct list_head *head;
+
+ if (!spin_trylock(&rds_ring->lock))
+ return;
+
+ producer = rds_ring->producer;
+
+ head = &rds_ring->free_list;
+ while (!list_empty(head)) {
+
+ buffer = list_entry(head->next, struct netxen_rx_buffer, list);
+
+ if (!buffer->skb) {
+ if (netxen_alloc_rx_skb(adapter, rds_ring, buffer))
+ break;
+ }
+
+ count++;
+ list_del(&buffer->list);
+
+ /* make a rcv descriptor */
+ pdesc = &rds_ring->desc_head[producer];
+ pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
+ pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
+ pdesc->addr_buffer = cpu_to_le64(buffer->dma);
+
+ producer = get_next_index(producer, rds_ring->num_desc);
+ }
+
+ if (count) {
+ rds_ring->producer = producer;
+ NXWRIO(adapter, rds_ring->crb_rcv_producer,
+ (producer - 1) & (rds_ring->num_desc - 1));
+ }
+ spin_unlock(&rds_ring->lock);
+}
+
+void netxen_nic_clear_stats(struct netxen_adapter *adapter)
+{
+ memset(&adapter->stats, 0, sizeof(adapter->stats));
+}
+
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
new file mode 100644
index 000000000..c005a9df5
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -0,0 +1,3482 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2003 - 2009 NetXen, Inc.
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ */
+
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/interrupt.h>
+#include "netxen_nic_hw.h"
+
+#include "netxen_nic.h"
+
+#include <linux/dma-mapping.h>
+#include <linux/if_vlan.h>
+#include <net/ip.h>
+#include <linux/ipv6.h>
+#include <linux/inetdevice.h>
+#include <linux/sysfs.h>
+#include <linux/aer.h>
+
+MODULE_DESCRIPTION("QLogic/NetXen (1/10) GbE Intelligent Ethernet Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID);
+MODULE_FIRMWARE(NX_UNIFIED_ROMIMAGE_NAME);
+
+char netxen_nic_driver_name[] = "netxen_nic";
+static char netxen_nic_driver_string[] = "QLogic/NetXen Network Driver v"
+ NETXEN_NIC_LINUX_VERSIONID;
+
+static int port_mode = NETXEN_PORT_MODE_AUTO_NEG;
+
+/* Default to restricted 1G auto-neg mode */
+static int wol_port_mode = 5;
+
+static int use_msi = 1;
+
+static int use_msi_x = 1;
+
+static int auto_fw_reset = AUTO_FW_RESET_ENABLED;
+module_param(auto_fw_reset, int, 0644);
+MODULE_PARM_DESC(auto_fw_reset,"Auto firmware reset (0=disabled, 1=enabled");
+
+static int netxen_nic_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent);
+static void netxen_nic_remove(struct pci_dev *pdev);
+static int netxen_nic_open(struct net_device *netdev);
+static int netxen_nic_close(struct net_device *netdev);
+static netdev_tx_t netxen_nic_xmit_frame(struct sk_buff *,
+ struct net_device *);
+static void netxen_tx_timeout(struct net_device *netdev, unsigned int txqueue);
+static void netxen_tx_timeout_task(struct work_struct *work);
+static void netxen_fw_poll_work(struct work_struct *work);
+static void netxen_schedule_work(struct netxen_adapter *adapter,
+ work_func_t func, int delay);
+static void netxen_cancel_fw_work(struct netxen_adapter *adapter);
+static int netxen_nic_poll(struct napi_struct *napi, int budget);
+
+static void netxen_create_sysfs_entries(struct netxen_adapter *adapter);
+static void netxen_remove_sysfs_entries(struct netxen_adapter *adapter);
+static void netxen_create_diag_entries(struct netxen_adapter *adapter);
+static void netxen_remove_diag_entries(struct netxen_adapter *adapter);
+static int nx_dev_request_aer(struct netxen_adapter *adapter);
+static int nx_decr_dev_ref_cnt(struct netxen_adapter *adapter);
+static int netxen_can_start_firmware(struct netxen_adapter *adapter);
+
+static irqreturn_t netxen_intr(int irq, void *data);
+static irqreturn_t netxen_msi_intr(int irq, void *data);
+static irqreturn_t netxen_msix_intr(int irq, void *data);
+
+static void netxen_free_ip_list(struct netxen_adapter *, bool);
+static void netxen_restore_indev_addr(struct net_device *dev, unsigned long);
+static void netxen_nic_get_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *stats);
+static int netxen_nic_set_mac(struct net_device *netdev, void *p);
+
+/* PCI Device ID Table */
+#define ENTRY(device) \
+ {PCI_DEVICE(PCI_VENDOR_ID_NETXEN, (device)), \
+ .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
+
+static const struct pci_device_id netxen_pci_tbl[] = {
+ ENTRY(PCI_DEVICE_ID_NX2031_10GXSR),
+ ENTRY(PCI_DEVICE_ID_NX2031_10GCX4),
+ ENTRY(PCI_DEVICE_ID_NX2031_4GCU),
+ ENTRY(PCI_DEVICE_ID_NX2031_IMEZ),
+ ENTRY(PCI_DEVICE_ID_NX2031_HMEZ),
+ ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT),
+ ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT2),
+ ENTRY(PCI_DEVICE_ID_NX3031),
+ {0,}
+};
+
+MODULE_DEVICE_TABLE(pci, netxen_pci_tbl);
+
+static uint32_t crb_cmd_producer[4] = {
+ CRB_CMD_PRODUCER_OFFSET, CRB_CMD_PRODUCER_OFFSET_1,
+ CRB_CMD_PRODUCER_OFFSET_2, CRB_CMD_PRODUCER_OFFSET_3
+};
+
+void
+netxen_nic_update_cmd_producer(struct netxen_adapter *adapter,
+ struct nx_host_tx_ring *tx_ring)
+{
+ NXWRIO(adapter, tx_ring->crb_cmd_producer, tx_ring->producer);
+}
+
+static uint32_t crb_cmd_consumer[4] = {
+ CRB_CMD_CONSUMER_OFFSET, CRB_CMD_CONSUMER_OFFSET_1,
+ CRB_CMD_CONSUMER_OFFSET_2, CRB_CMD_CONSUMER_OFFSET_3
+};
+
+static inline void
+netxen_nic_update_cmd_consumer(struct netxen_adapter *adapter,
+ struct nx_host_tx_ring *tx_ring)
+{
+ NXWRIO(adapter, tx_ring->crb_cmd_consumer, tx_ring->sw_consumer);
+}
+
+static uint32_t msi_tgt_status[8] = {
+ ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
+ ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
+ ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
+ ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
+};
+
+static struct netxen_legacy_intr_set legacy_intr[] = NX_LEGACY_INTR_CONFIG;
+
+static inline void netxen_nic_disable_int(struct nx_host_sds_ring *sds_ring)
+{
+ struct netxen_adapter *adapter = sds_ring->adapter;
+
+ NXWRIO(adapter, sds_ring->crb_intr_mask, 0);
+}
+
+static inline void netxen_nic_enable_int(struct nx_host_sds_ring *sds_ring)
+{
+ struct netxen_adapter *adapter = sds_ring->adapter;
+
+ NXWRIO(adapter, sds_ring->crb_intr_mask, 0x1);
+
+ if (!NETXEN_IS_MSI_FAMILY(adapter))
+ NXWRIO(adapter, adapter->tgt_mask_reg, 0xfbff);
+}
+
+static int
+netxen_alloc_sds_rings(struct netxen_recv_context *recv_ctx, int count)
+{
+ int size = sizeof(struct nx_host_sds_ring) * count;
+
+ recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
+
+ return recv_ctx->sds_rings == NULL;
+}
+
+static void
+netxen_free_sds_rings(struct netxen_recv_context *recv_ctx)
+{
+ kfree(recv_ctx->sds_rings);
+ recv_ctx->sds_rings = NULL;
+}
+
+static int
+netxen_napi_add(struct netxen_adapter *adapter, struct net_device *netdev)
+{
+ int ring;
+ struct nx_host_sds_ring *sds_ring;
+ struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+
+ if (netxen_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
+ return -ENOMEM;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ netif_napi_add(netdev, &sds_ring->napi, netxen_nic_poll);
+ }
+
+ return 0;
+}
+
+static void
+netxen_napi_del(struct netxen_adapter *adapter)
+{
+ int ring;
+ struct nx_host_sds_ring *sds_ring;
+ struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ netif_napi_del(&sds_ring->napi);
+ }
+
+ netxen_free_sds_rings(&adapter->recv_ctx);
+}
+
+static void
+netxen_napi_enable(struct netxen_adapter *adapter)
+{
+ int ring;
+ struct nx_host_sds_ring *sds_ring;
+ struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ napi_enable(&sds_ring->napi);
+ netxen_nic_enable_int(sds_ring);
+ }
+}
+
+static void
+netxen_napi_disable(struct netxen_adapter *adapter)
+{
+ int ring;
+ struct nx_host_sds_ring *sds_ring;
+ struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ netxen_nic_disable_int(sds_ring);
+ napi_synchronize(&sds_ring->napi);
+ napi_disable(&sds_ring->napi);
+ }
+}
+
+static int nx_set_dma_mask(struct netxen_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ uint64_t mask, cmask;
+
+ adapter->pci_using_dac = 0;
+
+ mask = DMA_BIT_MASK(32);
+ cmask = DMA_BIT_MASK(32);
+
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+#ifndef CONFIG_IA64
+ mask = DMA_BIT_MASK(35);
+#endif
+ } else {
+ mask = DMA_BIT_MASK(39);
+ cmask = mask;
+ }
+
+ if (dma_set_mask(&pdev->dev, mask) == 0 &&
+ dma_set_coherent_mask(&pdev->dev, cmask) == 0) {
+ adapter->pci_using_dac = 1;
+ return 0;
+ }
+
+ return -EIO;
+}
+
+/* Update addressable range if firmware supports it */
+static int
+nx_update_dma_mask(struct netxen_adapter *adapter)
+{
+ int change, shift, err;
+ uint64_t mask, old_mask, old_cmask;
+ struct pci_dev *pdev = adapter->pdev;
+
+ change = 0;
+
+ shift = NXRD32(adapter, CRB_DMA_SHIFT);
+ if (shift > 32)
+ return 0;
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id) && (shift > 9))
+ change = 1;
+ else if ((adapter->ahw.revision_id == NX_P2_C1) && (shift <= 4))
+ change = 1;
+
+ if (change) {
+ old_mask = pdev->dma_mask;
+ old_cmask = pdev->dev.coherent_dma_mask;
+
+ mask = DMA_BIT_MASK(32+shift);
+
+ err = dma_set_mask(&pdev->dev, mask);
+ if (err)
+ goto err_out;
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+
+ err = dma_set_coherent_mask(&pdev->dev, mask);
+ if (err)
+ goto err_out;
+ }
+ dev_info(&pdev->dev, "using %d-bit dma mask\n", 32+shift);
+ }
+
+ return 0;
+
+err_out:
+ dma_set_mask(&pdev->dev, old_mask);
+ dma_set_coherent_mask(&pdev->dev, old_cmask);
+ return err;
+}
+
+static int
+netxen_check_hw_init(struct netxen_adapter *adapter, int first_boot)
+{
+ u32 val, timeout;
+
+ if (first_boot == 0x55555555) {
+ /* This is the first boot after power up */
+ NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), NETXEN_BDINFO_MAGIC);
+
+ if (!NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ return 0;
+
+ /* PCI bus master workaround */
+ first_boot = NXRD32(adapter, NETXEN_PCIE_REG(0x4));
+ if (!(first_boot & 0x4)) {
+ first_boot |= 0x4;
+ NXWR32(adapter, NETXEN_PCIE_REG(0x4), first_boot);
+ NXRD32(adapter, NETXEN_PCIE_REG(0x4));
+ }
+
+ /* This is the first boot after power up */
+ first_boot = NXRD32(adapter, NETXEN_ROMUSB_GLB_SW_RESET);
+ if (first_boot != 0x80000f) {
+ /* clear the register for future unloads/loads */
+ NXWR32(adapter, NETXEN_CAM_RAM(0x1fc), 0);
+ return -EIO;
+ }
+
+ /* Start P2 boot loader */
+ val = NXRD32(adapter, NETXEN_ROMUSB_GLB_PEGTUNE_DONE);
+ NXWR32(adapter, NETXEN_ROMUSB_GLB_PEGTUNE_DONE, val | 0x1);
+ timeout = 0;
+ do {
+ msleep(1);
+ val = NXRD32(adapter, NETXEN_CAM_RAM(0x1fc));
+
+ if (++timeout > 5000)
+ return -EIO;
+
+ } while (val == NETXEN_BDINFO_MAGIC);
+ }
+ return 0;
+}
+
+static void netxen_set_port_mode(struct netxen_adapter *adapter)
+{
+ u32 val, data;
+
+ val = adapter->ahw.board_type;
+ if ((val == NETXEN_BRDTYPE_P3_HMEZ) ||
+ (val == NETXEN_BRDTYPE_P3_XG_LOM)) {
+ if (port_mode == NETXEN_PORT_MODE_802_3_AP) {
+ data = NETXEN_PORT_MODE_802_3_AP;
+ NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
+ } else if (port_mode == NETXEN_PORT_MODE_XG) {
+ data = NETXEN_PORT_MODE_XG;
+ NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
+ } else if (port_mode == NETXEN_PORT_MODE_AUTO_NEG_1G) {
+ data = NETXEN_PORT_MODE_AUTO_NEG_1G;
+ NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
+ } else if (port_mode == NETXEN_PORT_MODE_AUTO_NEG_XG) {
+ data = NETXEN_PORT_MODE_AUTO_NEG_XG;
+ NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
+ } else {
+ data = NETXEN_PORT_MODE_AUTO_NEG;
+ NXWR32(adapter, NETXEN_PORT_MODE_ADDR, data);
+ }
+
+ if ((wol_port_mode != NETXEN_PORT_MODE_802_3_AP) &&
+ (wol_port_mode != NETXEN_PORT_MODE_XG) &&
+ (wol_port_mode != NETXEN_PORT_MODE_AUTO_NEG_1G) &&
+ (wol_port_mode != NETXEN_PORT_MODE_AUTO_NEG_XG)) {
+ wol_port_mode = NETXEN_PORT_MODE_AUTO_NEG;
+ }
+ NXWR32(adapter, NETXEN_WOL_PORT_MODE, wol_port_mode);
+ }
+}
+
+#define PCI_CAP_ID_GEN 0x10
+
+static void netxen_pcie_strap_init(struct netxen_adapter *adapter)
+{
+ u32 pdevfuncsave;
+ u32 c8c9value = 0;
+ u32 chicken = 0;
+ u32 control = 0;
+ int i, pos;
+ struct pci_dev *pdev;
+
+ pdev = adapter->pdev;
+
+ chicken = NXRD32(adapter, NETXEN_PCIE_REG(PCIE_CHICKEN3));
+ /* clear chicken3.25:24 */
+ chicken &= 0xFCFFFFFF;
+ /*
+ * if gen1 and B0, set F1020 - if gen 2, do nothing
+ * if gen2 set to F1000
+ */
+ pos = pci_find_capability(pdev, PCI_CAP_ID_GEN);
+ if (pos == 0xC0) {
+ pci_read_config_dword(pdev, pos + 0x10, &control);
+ if ((control & 0x000F0000) != 0x00020000) {
+ /* set chicken3.24 if gen1 */
+ chicken |= 0x01000000;
+ }
+ dev_info(&adapter->pdev->dev, "Gen2 strapping detected\n");
+ c8c9value = 0xF1000;
+ } else {
+ /* set chicken3.24 if gen1 */
+ chicken |= 0x01000000;
+ dev_info(&adapter->pdev->dev, "Gen1 strapping detected\n");
+ if (adapter->ahw.revision_id == NX_P3_B0)
+ c8c9value = 0xF1020;
+ else
+ c8c9value = 0;
+ }
+
+ NXWR32(adapter, NETXEN_PCIE_REG(PCIE_CHICKEN3), chicken);
+
+ if (!c8c9value)
+ return;
+
+ pdevfuncsave = pdev->devfn;
+ if (pdevfuncsave & 0x07)
+ return;
+
+ for (i = 0; i < 8; i++) {
+ pci_read_config_dword(pdev, pos + 8, &control);
+ pci_read_config_dword(pdev, pos + 8, &control);
+ pci_write_config_dword(pdev, pos + 8, c8c9value);
+ pdev->devfn++;
+ }
+ pdev->devfn = pdevfuncsave;
+}
+
+static void netxen_set_msix_bit(struct pci_dev *pdev, int enable)
+{
+ u32 control;
+
+ if (pdev->msix_cap) {
+ pci_read_config_dword(pdev, pdev->msix_cap, &control);
+ if (enable)
+ control |= PCI_MSIX_FLAGS_ENABLE;
+ else
+ control = 0;
+ pci_write_config_dword(pdev, pdev->msix_cap, control);
+ }
+}
+
+static void netxen_init_msix_entries(struct netxen_adapter *adapter, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ adapter->msix_entries[i].entry = i;
+}
+
+static int
+netxen_read_mac_addr(struct netxen_adapter *adapter)
+{
+ int i;
+ unsigned char *p;
+ u64 mac_addr;
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ u8 addr[ETH_ALEN];
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+ if (netxen_p3_get_mac_addr(adapter, &mac_addr) != 0)
+ return -EIO;
+ } else {
+ if (netxen_get_flash_mac_addr(adapter, &mac_addr) != 0)
+ return -EIO;
+ }
+
+ p = (unsigned char *)&mac_addr;
+ for (i = 0; i < 6; i++)
+ addr[i] = *(p + 5 - i);
+ eth_hw_addr_set(netdev, addr);
+
+ memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
+
+ /* set station address */
+
+ if (!is_valid_ether_addr(netdev->dev_addr))
+ dev_warn(&pdev->dev, "Bad MAC address %pM.\n", netdev->dev_addr);
+
+ return 0;
+}
+
+static int netxen_nic_set_mac(struct net_device *netdev, void *p)
+{
+ struct netxen_adapter *adapter = netdev_priv(netdev);
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ if (netif_running(netdev)) {
+ netif_device_detach(netdev);
+ netxen_napi_disable(adapter);
+ }
+
+ memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
+ adapter->macaddr_set(adapter, addr->sa_data);
+
+ if (netif_running(netdev)) {
+ netif_device_attach(netdev);
+ netxen_napi_enable(adapter);
+ }
+ return 0;
+}
+
+static void netxen_set_multicast_list(struct net_device *dev)
+{
+ struct netxen_adapter *adapter = netdev_priv(dev);
+
+ adapter->set_multi(dev);
+}
+
+static netdev_features_t netxen_fix_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ if (!(features & NETIF_F_RXCSUM)) {
+ netdev_info(dev, "disabling LRO as RXCSUM is off\n");
+
+ features &= ~NETIF_F_LRO;
+ }
+
+ return features;
+}
+
+static int netxen_set_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ struct netxen_adapter *adapter = netdev_priv(dev);
+ int hw_lro;
+
+ if (!((dev->features ^ features) & NETIF_F_LRO))
+ return 0;
+
+ hw_lro = (features & NETIF_F_LRO) ? NETXEN_NIC_LRO_ENABLED
+ : NETXEN_NIC_LRO_DISABLED;
+
+ if (netxen_config_hw_lro(adapter, hw_lro))
+ return -EIO;
+
+ if (!(features & NETIF_F_LRO) && netxen_send_lro_cleanup(adapter))
+ return -EIO;
+
+ return 0;
+}
+
+static const struct net_device_ops netxen_netdev_ops = {
+ .ndo_open = netxen_nic_open,
+ .ndo_stop = netxen_nic_close,
+ .ndo_start_xmit = netxen_nic_xmit_frame,
+ .ndo_get_stats64 = netxen_nic_get_stats,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_rx_mode = netxen_set_multicast_list,
+ .ndo_set_mac_address = netxen_nic_set_mac,
+ .ndo_change_mtu = netxen_nic_change_mtu,
+ .ndo_tx_timeout = netxen_tx_timeout,
+ .ndo_fix_features = netxen_fix_features,
+ .ndo_set_features = netxen_set_features,
+};
+
+static inline void netxen_set_interrupt_mode(struct netxen_adapter *adapter,
+ u32 mode)
+{
+ NXWR32(adapter, NETXEN_INTR_MODE_REG, mode);
+}
+
+static inline u32 netxen_get_interrupt_mode(struct netxen_adapter *adapter)
+{
+ return NXRD32(adapter, NETXEN_INTR_MODE_REG);
+}
+
+static void
+netxen_initialize_interrupt_registers(struct netxen_adapter *adapter)
+{
+ struct netxen_legacy_intr_set *legacy_intrp;
+ u32 tgt_status_reg, int_state_reg;
+
+ if (adapter->ahw.revision_id >= NX_P3_B0)
+ legacy_intrp = &legacy_intr[adapter->ahw.pci_func];
+ else
+ legacy_intrp = &legacy_intr[0];
+
+ tgt_status_reg = legacy_intrp->tgt_status_reg;
+ int_state_reg = ISR_INT_STATE_REG;
+
+ adapter->int_vec_bit = legacy_intrp->int_vec_bit;
+ adapter->tgt_status_reg = netxen_get_ioaddr(adapter, tgt_status_reg);
+ adapter->tgt_mask_reg = netxen_get_ioaddr(adapter,
+ legacy_intrp->tgt_mask_reg);
+ adapter->pci_int_reg = netxen_get_ioaddr(adapter,
+ legacy_intrp->pci_int_reg);
+ adapter->isr_int_vec = netxen_get_ioaddr(adapter, ISR_INT_VECTOR);
+
+ if (adapter->ahw.revision_id >= NX_P3_B1)
+ adapter->crb_int_state_reg = netxen_get_ioaddr(adapter,
+ int_state_reg);
+ else
+ adapter->crb_int_state_reg = netxen_get_ioaddr(adapter,
+ CRB_INT_VECTOR);
+}
+
+static int netxen_setup_msi_interrupts(struct netxen_adapter *adapter,
+ int num_msix)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ u32 value;
+ int err;
+
+ if (adapter->msix_supported) {
+ netxen_init_msix_entries(adapter, num_msix);
+ err = pci_enable_msix_range(pdev, adapter->msix_entries,
+ num_msix, num_msix);
+ if (err > 0) {
+ adapter->flags |= NETXEN_NIC_MSIX_ENABLED;
+ netxen_set_msix_bit(pdev, 1);
+
+ if (adapter->rss_supported)
+ adapter->max_sds_rings = num_msix;
+
+ dev_info(&pdev->dev, "using msi-x interrupts\n");
+ return 0;
+ }
+ /* fall through for msi */
+ }
+
+ if (use_msi && !pci_enable_msi(pdev)) {
+ value = msi_tgt_status[adapter->ahw.pci_func];
+ adapter->flags |= NETXEN_NIC_MSI_ENABLED;
+ adapter->tgt_status_reg = netxen_get_ioaddr(adapter, value);
+ adapter->msix_entries[0].vector = pdev->irq;
+ dev_info(&pdev->dev, "using msi interrupts\n");
+ return 0;
+ }
+
+ dev_err(&pdev->dev, "Failed to acquire MSI-X/MSI interrupt vector\n");
+ return -EIO;
+}
+
+static int netxen_setup_intr(struct netxen_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int num_msix;
+
+ if (adapter->rss_supported)
+ num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ?
+ MSIX_ENTRIES_PER_ADAPTER : 2;
+ else
+ num_msix = 1;
+
+ adapter->max_sds_rings = 1;
+ adapter->flags &= ~(NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED);
+
+ netxen_initialize_interrupt_registers(adapter);
+ netxen_set_msix_bit(pdev, 0);
+
+ if (adapter->portnum == 0) {
+ if (!netxen_setup_msi_interrupts(adapter, num_msix))
+ netxen_set_interrupt_mode(adapter, NETXEN_MSI_MODE);
+ else
+ netxen_set_interrupt_mode(adapter, NETXEN_INTX_MODE);
+ } else {
+ if (netxen_get_interrupt_mode(adapter) == NETXEN_MSI_MODE &&
+ netxen_setup_msi_interrupts(adapter, num_msix)) {
+ dev_err(&pdev->dev, "Co-existence of MSI-X/MSI and INTx interrupts is not supported\n");
+ return -EIO;
+ }
+ }
+
+ if (!NETXEN_IS_MSI_FAMILY(adapter)) {
+ adapter->msix_entries[0].vector = pdev->irq;
+ dev_info(&pdev->dev, "using legacy interrupts\n");
+ }
+ return 0;
+}
+
+static void
+netxen_teardown_intr(struct netxen_adapter *adapter)
+{
+ if (adapter->flags & NETXEN_NIC_MSIX_ENABLED)
+ pci_disable_msix(adapter->pdev);
+ if (adapter->flags & NETXEN_NIC_MSI_ENABLED)
+ pci_disable_msi(adapter->pdev);
+}
+
+static void
+netxen_cleanup_pci_map(struct netxen_adapter *adapter)
+{
+ if (adapter->ahw.db_base != NULL)
+ iounmap(adapter->ahw.db_base);
+ if (adapter->ahw.pci_base0 != NULL)
+ iounmap(adapter->ahw.pci_base0);
+ if (adapter->ahw.pci_base1 != NULL)
+ iounmap(adapter->ahw.pci_base1);
+ if (adapter->ahw.pci_base2 != NULL)
+ iounmap(adapter->ahw.pci_base2);
+}
+
+static int
+netxen_setup_pci_map(struct netxen_adapter *adapter)
+{
+ void __iomem *db_ptr = NULL;
+
+ resource_size_t mem_base, db_base;
+ unsigned long mem_len, db_len = 0;
+
+ struct pci_dev *pdev = adapter->pdev;
+ int pci_func = adapter->ahw.pci_func;
+ struct netxen_hardware_context *ahw = &adapter->ahw;
+
+ int err = 0;
+
+ /*
+ * Set the CRB window to invalid. If any register in window 0 is
+ * accessed it should set the window to 0 and then reset it to 1.
+ */
+ adapter->ahw.crb_win = -1;
+ adapter->ahw.ocm_win = -1;
+
+ /* remap phys address */
+ mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
+ mem_len = pci_resource_len(pdev, 0);
+
+ /* 128 Meg of memory */
+ if (mem_len == NETXEN_PCI_128MB_SIZE) {
+
+ ahw->pci_base0 = ioremap(mem_base, FIRST_PAGE_GROUP_SIZE);
+ ahw->pci_base1 = ioremap(mem_base + SECOND_PAGE_GROUP_START,
+ SECOND_PAGE_GROUP_SIZE);
+ ahw->pci_base2 = ioremap(mem_base + THIRD_PAGE_GROUP_START,
+ THIRD_PAGE_GROUP_SIZE);
+ if (ahw->pci_base0 == NULL || ahw->pci_base1 == NULL ||
+ ahw->pci_base2 == NULL) {
+ dev_err(&pdev->dev, "failed to map PCI bar 0\n");
+ err = -EIO;
+ goto err_out;
+ }
+
+ ahw->pci_len0 = FIRST_PAGE_GROUP_SIZE;
+
+ } else if (mem_len == NETXEN_PCI_32MB_SIZE) {
+
+ ahw->pci_base1 = ioremap(mem_base, SECOND_PAGE_GROUP_SIZE);
+ ahw->pci_base2 = ioremap(mem_base + THIRD_PAGE_GROUP_START -
+ SECOND_PAGE_GROUP_START, THIRD_PAGE_GROUP_SIZE);
+ if (ahw->pci_base1 == NULL || ahw->pci_base2 == NULL) {
+ dev_err(&pdev->dev, "failed to map PCI bar 0\n");
+ err = -EIO;
+ goto err_out;
+ }
+
+ } else if (mem_len == NETXEN_PCI_2MB_SIZE) {
+
+ ahw->pci_base0 = pci_ioremap_bar(pdev, 0);
+ if (ahw->pci_base0 == NULL) {
+ dev_err(&pdev->dev, "failed to map PCI bar 0\n");
+ return -EIO;
+ }
+ ahw->pci_len0 = mem_len;
+ } else {
+ return -EIO;
+ }
+
+ netxen_setup_hwops(adapter);
+
+ dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
+
+ if (NX_IS_REVISION_P3P(adapter->ahw.revision_id)) {
+ adapter->ahw.ocm_win_crb = netxen_get_ioaddr(adapter,
+ NETXEN_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(pci_func)));
+
+ } else if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+ adapter->ahw.ocm_win_crb = netxen_get_ioaddr(adapter,
+ NETXEN_PCIX_PS_REG(PCIE_MN_WINDOW_REG(pci_func)));
+ }
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ goto skip_doorbell;
+
+ db_base = pci_resource_start(pdev, 4); /* doorbell is on bar 4 */
+ db_len = pci_resource_len(pdev, 4);
+
+ if (db_len == 0) {
+ printk(KERN_ERR "%s: doorbell is disabled\n",
+ netxen_nic_driver_name);
+ err = -EIO;
+ goto err_out;
+ }
+
+ db_ptr = ioremap(db_base, NETXEN_DB_MAPSIZE_BYTES);
+ if (!db_ptr) {
+ printk(KERN_ERR "%s: Failed to allocate doorbell map.",
+ netxen_nic_driver_name);
+ err = -EIO;
+ goto err_out;
+ }
+
+skip_doorbell:
+ adapter->ahw.db_base = db_ptr;
+ adapter->ahw.db_len = db_len;
+ return 0;
+
+err_out:
+ netxen_cleanup_pci_map(adapter);
+ return err;
+}
+
+static void
+netxen_check_options(struct netxen_adapter *adapter)
+{
+ u32 fw_major, fw_minor, fw_build, prev_fw_version;
+ char brd_name[NETXEN_MAX_SHORT_NAME];
+ char serial_num[32];
+ int i, offset, val, err;
+ __le32 *ptr32;
+ struct pci_dev *pdev = adapter->pdev;
+
+ adapter->driver_mismatch = 0;
+
+ ptr32 = (__le32 *)&serial_num;
+ offset = NX_FW_SERIAL_NUM_OFFSET;
+ for (i = 0; i < 8; i++) {
+ err = netxen_rom_fast_read(adapter, offset, &val);
+ if (err) {
+ dev_err(&pdev->dev, "error reading board info\n");
+ adapter->driver_mismatch = 1;
+ return;
+ }
+ ptr32[i] = cpu_to_le32(val);
+ offset += sizeof(u32);
+ }
+
+ fw_major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR);
+ fw_minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR);
+ fw_build = NXRD32(adapter, NETXEN_FW_VERSION_SUB);
+ prev_fw_version = adapter->fw_version;
+ adapter->fw_version = NETXEN_VERSION_CODE(fw_major, fw_minor, fw_build);
+
+ /* Get FW Mini Coredump template and store it */
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+ if (adapter->mdump.md_template == NULL ||
+ adapter->fw_version > prev_fw_version) {
+ kfree(adapter->mdump.md_template);
+ adapter->mdump.md_template = NULL;
+ err = netxen_setup_minidump(adapter);
+ if (err)
+ dev_err(&adapter->pdev->dev,
+ "Failed to setup minidump rcode = %d\n", err);
+ }
+ }
+
+ if (adapter->portnum == 0) {
+ if (netxen_nic_get_brd_name_by_type(adapter->ahw.board_type,
+ brd_name))
+ strcpy(serial_num, "Unknown");
+
+ pr_info("%s: %s Board S/N %s Chip rev 0x%x\n",
+ module_name(THIS_MODULE),
+ brd_name, serial_num, adapter->ahw.revision_id);
+ }
+
+ if (adapter->fw_version < NETXEN_VERSION_CODE(3, 4, 216)) {
+ adapter->driver_mismatch = 1;
+ dev_warn(&pdev->dev, "firmware version %d.%d.%d unsupported\n",
+ fw_major, fw_minor, fw_build);
+ return;
+ }
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+ i = NXRD32(adapter, NETXEN_SRE_MISC);
+ adapter->ahw.cut_through = (i & 0x8000) ? 1 : 0;
+ }
+
+ dev_info(&pdev->dev, "Driver v%s, firmware v%d.%d.%d [%s]\n",
+ NETXEN_NIC_LINUX_VERSIONID, fw_major, fw_minor, fw_build,
+ adapter->ahw.cut_through ? "cut-through" : "legacy");
+
+ if (adapter->fw_version >= NETXEN_VERSION_CODE(4, 0, 222))
+ adapter->capabilities = NXRD32(adapter, CRB_FW_CAPABILITIES_1);
+
+ if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
+ adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
+ adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+ } else if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
+ adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
+ adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+ }
+
+ adapter->msix_supported = 0;
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+ adapter->msix_supported = !!use_msi_x;
+ adapter->rss_supported = !!use_msi_x;
+ } else {
+ u32 flashed_ver = 0;
+ netxen_rom_fast_read(adapter,
+ NX_FW_VERSION_OFFSET, (int *)&flashed_ver);
+ flashed_ver = NETXEN_DECODE_VERSION(flashed_ver);
+
+ if (flashed_ver >= NETXEN_VERSION_CODE(3, 4, 336)) {
+ switch (adapter->ahw.board_type) {
+ case NETXEN_BRDTYPE_P2_SB31_10G:
+ case NETXEN_BRDTYPE_P2_SB31_10G_CX4:
+ adapter->msix_supported = !!use_msi_x;
+ adapter->rss_supported = !!use_msi_x;
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ adapter->num_txd = MAX_CMD_DESCRIPTORS;
+
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+ adapter->num_lro_rxd = MAX_LRO_RCV_DESCRIPTORS;
+ adapter->max_rds_rings = 3;
+ } else {
+ adapter->num_lro_rxd = 0;
+ adapter->max_rds_rings = 2;
+ }
+}
+
+static int
+netxen_start_firmware(struct netxen_adapter *adapter)
+{
+ int val, err, first_boot;
+ struct pci_dev *pdev = adapter->pdev;
+
+ /* required for NX2031 dummy dma */
+ err = nx_set_dma_mask(adapter);
+ if (err)
+ return err;
+
+ err = netxen_can_start_firmware(adapter);
+
+ if (err < 0)
+ return err;
+
+ if (!err)
+ goto wait_init;
+
+ first_boot = NXRD32(adapter, NETXEN_CAM_RAM(0x1fc));
+
+ err = netxen_check_hw_init(adapter, first_boot);
+ if (err) {
+ dev_err(&pdev->dev, "error in init HW init sequence\n");
+ return err;
+ }
+
+ netxen_request_firmware(adapter);
+
+ err = netxen_need_fw_reset(adapter);
+ if (err < 0)
+ goto err_out;
+ if (err == 0)
+ goto pcie_strap_init;
+
+ if (first_boot != 0x55555555) {
+ NXWR32(adapter, CRB_CMDPEG_STATE, 0);
+ netxen_pinit_from_rom(adapter);
+ msleep(1);
+ }
+
+ NXWR32(adapter, CRB_DMA_SHIFT, 0x55555555);
+ NXWR32(adapter, NETXEN_PEG_HALT_STATUS1, 0);
+ NXWR32(adapter, NETXEN_PEG_HALT_STATUS2, 0);
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ netxen_set_port_mode(adapter);
+
+ err = netxen_load_firmware(adapter);
+ if (err)
+ goto err_out;
+
+ netxen_release_firmware(adapter);
+
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+
+ /* Initialize multicast addr pool owners */
+ val = 0x7654;
+ if (adapter->ahw.port_type == NETXEN_NIC_XGBE)
+ val |= 0x0f000000;
+ NXWR32(adapter, NETXEN_MAC_ADDR_CNTL_REG, val);
+
+ }
+
+ err = netxen_init_dummy_dma(adapter);
+ if (err)
+ goto err_out;
+
+ /*
+ * Tell the hardware our version number.
+ */
+ val = (_NETXEN_NIC_LINUX_MAJOR << 16)
+ | ((_NETXEN_NIC_LINUX_MINOR << 8))
+ | (_NETXEN_NIC_LINUX_SUBVERSION);
+ NXWR32(adapter, CRB_DRIVER_VERSION, val);
+
+pcie_strap_init:
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ netxen_pcie_strap_init(adapter);
+
+wait_init:
+ /* Handshake with the card before we register the devices. */
+ err = netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE);
+ if (err) {
+ netxen_free_dummy_dma(adapter);
+ goto err_out;
+ }
+
+ NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_READY);
+
+ nx_update_dma_mask(adapter);
+
+ netxen_check_options(adapter);
+
+ adapter->need_fw_reset = 0;
+
+ /* fall through and release firmware */
+
+err_out:
+ netxen_release_firmware(adapter);
+ return err;
+}
+
+static int
+netxen_nic_request_irq(struct netxen_adapter *adapter)
+{
+ irq_handler_t handler;
+ struct nx_host_sds_ring *sds_ring;
+ int err, ring;
+
+ unsigned long flags = 0;
+ struct net_device *netdev = adapter->netdev;
+ struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+
+ if (adapter->flags & NETXEN_NIC_MSIX_ENABLED)
+ handler = netxen_msix_intr;
+ else if (adapter->flags & NETXEN_NIC_MSI_ENABLED)
+ handler = netxen_msi_intr;
+ else {
+ flags |= IRQF_SHARED;
+ handler = netxen_intr;
+ }
+ adapter->irq = netdev->irq;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ sprintf(sds_ring->name, "%s[%d]", netdev->name, ring);
+ err = request_irq(sds_ring->irq, handler,
+ flags, sds_ring->name, sds_ring);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void
+netxen_nic_free_irq(struct netxen_adapter *adapter)
+{
+ int ring;
+ struct nx_host_sds_ring *sds_ring;
+
+ struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ free_irq(sds_ring->irq, sds_ring);
+ }
+}
+
+static void
+netxen_nic_init_coalesce_defaults(struct netxen_adapter *adapter)
+{
+ adapter->coal.flags = NETXEN_NIC_INTR_DEFAULT;
+ adapter->coal.normal.data.rx_time_us =
+ NETXEN_DEFAULT_INTR_COALESCE_RX_TIME_US;
+ adapter->coal.normal.data.rx_packets =
+ NETXEN_DEFAULT_INTR_COALESCE_RX_PACKETS;
+ adapter->coal.normal.data.tx_time_us =
+ NETXEN_DEFAULT_INTR_COALESCE_TX_TIME_US;
+ adapter->coal.normal.data.tx_packets =
+ NETXEN_DEFAULT_INTR_COALESCE_TX_PACKETS;
+}
+
+/* with rtnl_lock */
+static int
+__netxen_nic_up(struct netxen_adapter *adapter, struct net_device *netdev)
+{
+ int err;
+
+ if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
+ return -EIO;
+
+ err = adapter->init_port(adapter, adapter->physical_port);
+ if (err) {
+ printk(KERN_ERR "%s: Failed to initialize port %d\n",
+ netxen_nic_driver_name, adapter->portnum);
+ return err;
+ }
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ adapter->macaddr_set(adapter, adapter->mac_addr);
+
+ adapter->set_multi(netdev);
+ adapter->set_mtu(adapter, netdev->mtu);
+
+ adapter->ahw.linkup = 0;
+
+ if (adapter->max_sds_rings > 1)
+ netxen_config_rss(adapter, 1);
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ netxen_config_intr_coalesce(adapter);
+
+ if (netdev->features & NETIF_F_LRO)
+ netxen_config_hw_lro(adapter, NETXEN_NIC_LRO_ENABLED);
+
+ netxen_napi_enable(adapter);
+
+ if (adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION)
+ netxen_linkevent_request(adapter, 1);
+ else
+ netxen_nic_set_link_parameters(adapter);
+
+ set_bit(__NX_DEV_UP, &adapter->state);
+ return 0;
+}
+
+/* Usage: During resume and firmware recovery module.*/
+
+static inline int
+netxen_nic_up(struct netxen_adapter *adapter, struct net_device *netdev)
+{
+ int err = 0;
+
+ rtnl_lock();
+ if (netif_running(netdev))
+ err = __netxen_nic_up(adapter, netdev);
+ rtnl_unlock();
+
+ return err;
+}
+
+/* with rtnl_lock */
+static void
+__netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev)
+{
+ if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
+ return;
+
+ if (!test_and_clear_bit(__NX_DEV_UP, &adapter->state))
+ return;
+
+ smp_mb();
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
+
+ if (adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION)
+ netxen_linkevent_request(adapter, 0);
+
+ if (adapter->stop_port)
+ adapter->stop_port(adapter);
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ netxen_p3_free_mac_list(adapter);
+
+ adapter->set_promisc(adapter, NETXEN_NIU_NON_PROMISC_MODE);
+
+ netxen_napi_disable(adapter);
+
+ netxen_release_tx_buffers(adapter);
+}
+
+/* Usage: During suspend and firmware recovery module */
+
+static inline void
+netxen_nic_down(struct netxen_adapter *adapter, struct net_device *netdev)
+{
+ rtnl_lock();
+ if (netif_running(netdev))
+ __netxen_nic_down(adapter, netdev);
+ rtnl_unlock();
+
+}
+
+static int
+netxen_nic_attach(struct netxen_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ int err, ring;
+ struct nx_host_rds_ring *rds_ring;
+ struct nx_host_tx_ring *tx_ring;
+ u32 capab2;
+
+ if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC)
+ return 0;
+
+ err = netxen_init_firmware(adapter);
+ if (err)
+ return err;
+
+ adapter->flags &= ~NETXEN_FW_MSS_CAP;
+ if (adapter->capabilities & NX_FW_CAPABILITY_MORE_CAPS) {
+ capab2 = NXRD32(adapter, CRB_FW_CAPABILITIES_2);
+ if (capab2 & NX_FW_CAPABILITY_2_LRO_MAX_TCP_SEG)
+ adapter->flags |= NETXEN_FW_MSS_CAP;
+ }
+
+ err = netxen_napi_add(adapter, netdev);
+ if (err)
+ return err;
+
+ err = netxen_alloc_sw_resources(adapter);
+ if (err) {
+ printk(KERN_ERR "%s: Error in setting sw resources\n",
+ netdev->name);
+ return err;
+ }
+
+ err = netxen_alloc_hw_resources(adapter);
+ if (err) {
+ printk(KERN_ERR "%s: Error in setting hw resources\n",
+ netdev->name);
+ goto err_out_free_sw;
+ }
+
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+ tx_ring = adapter->tx_ring;
+ tx_ring->crb_cmd_producer = netxen_get_ioaddr(adapter,
+ crb_cmd_producer[adapter->portnum]);
+ tx_ring->crb_cmd_consumer = netxen_get_ioaddr(adapter,
+ crb_cmd_consumer[adapter->portnum]);
+
+ tx_ring->producer = 0;
+ tx_ring->sw_consumer = 0;
+
+ netxen_nic_update_cmd_producer(adapter, tx_ring);
+ netxen_nic_update_cmd_consumer(adapter, tx_ring);
+ }
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &adapter->recv_ctx.rds_rings[ring];
+ netxen_post_rx_buffers(adapter, ring, rds_ring);
+ }
+
+ err = netxen_nic_request_irq(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "%s: failed to setup interrupt\n",
+ netdev->name);
+ goto err_out_free_rxbuf;
+ }
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ netxen_nic_init_coalesce_defaults(adapter);
+
+ netxen_create_sysfs_entries(adapter);
+
+ adapter->is_up = NETXEN_ADAPTER_UP_MAGIC;
+ return 0;
+
+err_out_free_rxbuf:
+ netxen_release_rx_buffers(adapter);
+ netxen_free_hw_resources(adapter);
+err_out_free_sw:
+ netxen_free_sw_resources(adapter);
+ return err;
+}
+
+static void
+netxen_nic_detach(struct netxen_adapter *adapter)
+{
+ if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
+ return;
+
+ netxen_remove_sysfs_entries(adapter);
+
+ netxen_free_hw_resources(adapter);
+ netxen_release_rx_buffers(adapter);
+ netxen_nic_free_irq(adapter);
+ netxen_napi_del(adapter);
+ netxen_free_sw_resources(adapter);
+
+ adapter->is_up = 0;
+}
+
+int
+netxen_nic_reset_context(struct netxen_adapter *adapter)
+{
+ int err = 0;
+ struct net_device *netdev = adapter->netdev;
+
+ if (test_and_set_bit(__NX_RESETTING, &adapter->state))
+ return -EBUSY;
+
+ if (adapter->is_up == NETXEN_ADAPTER_UP_MAGIC) {
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev))
+ __netxen_nic_down(adapter, netdev);
+
+ netxen_nic_detach(adapter);
+
+ if (netif_running(netdev)) {
+ err = netxen_nic_attach(adapter);
+ if (!err)
+ err = __netxen_nic_up(adapter, netdev);
+
+ if (err)
+ goto done;
+ }
+
+ netif_device_attach(netdev);
+ }
+
+done:
+ clear_bit(__NX_RESETTING, &adapter->state);
+ return err;
+}
+
+static int
+netxen_setup_netdev(struct netxen_adapter *adapter,
+ struct net_device *netdev)
+{
+ int err = 0;
+ struct pci_dev *pdev = adapter->pdev;
+
+ adapter->mc_enabled = 0;
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ adapter->max_mc_count = 38;
+ else
+ adapter->max_mc_count = 16;
+
+ netdev->netdev_ops = &netxen_netdev_ops;
+ netdev->watchdog_timeo = 5*HZ;
+
+ netxen_nic_change_mtu(netdev, netdev->mtu);
+
+ netdev->ethtool_ops = &netxen_nic_ethtool_ops;
+
+ netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
+ NETIF_F_RXCSUM;
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ netdev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
+
+ netdev->vlan_features |= netdev->hw_features;
+
+ if (adapter->pci_using_dac) {
+ netdev->features |= NETIF_F_HIGHDMA;
+ netdev->vlan_features |= NETIF_F_HIGHDMA;
+ }
+
+ if (adapter->capabilities & NX_FW_CAPABILITY_FVLANTX)
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
+
+ if (adapter->capabilities & NX_FW_CAPABILITY_HW_LRO)
+ netdev->hw_features |= NETIF_F_LRO;
+
+ netdev->features |= netdev->hw_features;
+
+ netdev->irq = adapter->msix_entries[0].vector;
+
+ INIT_WORK(&adapter->tx_timeout_task, netxen_tx_timeout_task);
+
+ if (netxen_read_mac_addr(adapter))
+ dev_warn(&pdev->dev, "failed to read mac addr\n");
+
+ netif_carrier_off(netdev);
+
+ err = register_netdev(netdev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to register net device\n");
+ return err;
+ }
+
+ return 0;
+}
+
+#define NETXEN_ULA_ADAPTER_KEY (0xdaddad01)
+#define NETXEN_NON_ULA_ADAPTER_KEY (0xdaddad00)
+
+static void netxen_read_ula_info(struct netxen_adapter *adapter)
+{
+ u32 temp;
+
+ /* Print ULA info only once for an adapter */
+ if (adapter->portnum != 0)
+ return;
+
+ temp = NXRD32(adapter, NETXEN_ULA_KEY);
+ switch (temp) {
+ case NETXEN_ULA_ADAPTER_KEY:
+ dev_info(&adapter->pdev->dev, "ULA adapter");
+ break;
+ case NETXEN_NON_ULA_ADAPTER_KEY:
+ dev_info(&adapter->pdev->dev, "non ULA adapter");
+ break;
+ default:
+ break;
+ }
+
+ return;
+}
+
+#ifdef CONFIG_PCIEAER
+static void netxen_mask_aer_correctable(struct netxen_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct pci_dev *root = pdev->bus->self;
+ u32 aer_pos;
+
+ /* root bus? */
+ if (!root)
+ return;
+
+ if (adapter->ahw.board_type != NETXEN_BRDTYPE_P3_4_GB_MM &&
+ adapter->ahw.board_type != NETXEN_BRDTYPE_P3_10G_TP)
+ return;
+
+ if (pci_pcie_type(root) != PCI_EXP_TYPE_ROOT_PORT)
+ return;
+
+ aer_pos = pci_find_ext_capability(root, PCI_EXT_CAP_ID_ERR);
+ if (!aer_pos)
+ return;
+
+ pci_write_config_dword(root, aer_pos + PCI_ERR_COR_MASK, 0xffff);
+}
+#endif
+
+static int
+netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct net_device *netdev = NULL;
+ struct netxen_adapter *adapter = NULL;
+ int i = 0, err;
+ int pci_func_id = PCI_FUNC(pdev->devfn);
+ uint8_t revision_id;
+ u32 val;
+
+ if (pdev->revision >= NX_P3_A0 && pdev->revision <= NX_P3_B1) {
+ pr_warn("%s: chip revisions between 0x%x-0x%x will not be enabled\n",
+ module_name(THIS_MODULE), NX_P3_A0, NX_P3_B1);
+ return -ENODEV;
+ }
+
+ if ((err = pci_enable_device(pdev)))
+ return err;
+
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+ err = -ENODEV;
+ goto err_out_disable_pdev;
+ }
+
+ if ((err = pci_request_regions(pdev, netxen_nic_driver_name)))
+ goto err_out_disable_pdev;
+
+ if (NX_IS_REVISION_P3(pdev->revision))
+ pci_enable_pcie_error_reporting(pdev);
+
+ pci_set_master(pdev);
+
+ netdev = alloc_etherdev(sizeof(struct netxen_adapter));
+ if(!netdev) {
+ err = -ENOMEM;
+ goto err_out_free_res;
+ }
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ adapter = netdev_priv(netdev);
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
+ adapter->ahw.pci_func = pci_func_id;
+
+ revision_id = pdev->revision;
+ adapter->ahw.revision_id = revision_id;
+
+ rwlock_init(&adapter->ahw.crb_lock);
+ spin_lock_init(&adapter->ahw.mem_lock);
+
+ spin_lock_init(&adapter->tx_clean_lock);
+ INIT_LIST_HEAD(&adapter->mac_list);
+ INIT_LIST_HEAD(&adapter->ip_list);
+
+ err = netxen_setup_pci_map(adapter);
+ if (err)
+ goto err_out_free_netdev;
+
+ /* This will be reset for mezz cards */
+ adapter->portnum = pci_func_id;
+
+ err = netxen_nic_get_board_info(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Error getting board config info.\n");
+ goto err_out_iounmap;
+ }
+
+#ifdef CONFIG_PCIEAER
+ netxen_mask_aer_correctable(adapter);
+#endif
+
+ /* Mezz cards have PCI function 0,2,3 enabled */
+ switch (adapter->ahw.board_type) {
+ case NETXEN_BRDTYPE_P2_SB31_10G_IMEZ:
+ case NETXEN_BRDTYPE_P2_SB31_10G_HMEZ:
+ if (pci_func_id >= 2)
+ adapter->portnum = pci_func_id - 2;
+ break;
+ default:
+ break;
+ }
+
+ err = netxen_check_flash_fw_compatibility(adapter);
+ if (err)
+ goto err_out_iounmap;
+
+ if (adapter->portnum == 0) {
+ val = NXRD32(adapter, NX_CRB_DEV_REF_COUNT);
+ if (val != 0xffffffff && val != 0) {
+ NXWR32(adapter, NX_CRB_DEV_REF_COUNT, 0);
+ adapter->need_fw_reset = 1;
+ }
+ }
+
+ err = netxen_start_firmware(adapter);
+ if (err)
+ goto err_out_decr_ref;
+
+ /*
+ * See if the firmware gave us a virtual-physical port mapping.
+ */
+ adapter->physical_port = adapter->portnum;
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+ i = NXRD32(adapter, CRB_V2P(adapter->portnum));
+ if (i != 0x55555555)
+ adapter->physical_port = i;
+ }
+
+ /* MTU range: 0 - 8000 (P2) or 9600 (P3) */
+ netdev->min_mtu = 0;
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ netdev->max_mtu = P3_MAX_MTU;
+ else
+ netdev->max_mtu = P2_MAX_MTU;
+
+ netxen_nic_clear_stats(adapter);
+
+ err = netxen_setup_intr(adapter);
+
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to setup interrupts, error = %d\n", err);
+ goto err_out_disable_msi;
+ }
+
+ netxen_read_ula_info(adapter);
+
+ err = netxen_setup_netdev(adapter, netdev);
+ if (err)
+ goto err_out_disable_msi;
+
+ pci_set_drvdata(pdev, adapter);
+
+ netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY);
+
+ switch (adapter->ahw.port_type) {
+ case NETXEN_NIC_GBE:
+ dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
+ adapter->netdev->name);
+ break;
+ case NETXEN_NIC_XGBE:
+ dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
+ adapter->netdev->name);
+ break;
+ }
+
+ netxen_create_diag_entries(adapter);
+
+ return 0;
+
+err_out_disable_msi:
+ netxen_teardown_intr(adapter);
+
+ netxen_free_dummy_dma(adapter);
+
+err_out_decr_ref:
+ nx_decr_dev_ref_cnt(adapter);
+
+err_out_iounmap:
+ netxen_cleanup_pci_map(adapter);
+
+err_out_free_netdev:
+ free_netdev(netdev);
+
+err_out_free_res:
+ if (NX_IS_REVISION_P3(pdev->revision))
+ pci_disable_pcie_error_reporting(pdev);
+ pci_release_regions(pdev);
+
+err_out_disable_pdev:
+ pci_disable_device(pdev);
+ return err;
+}
+
+static
+void netxen_cleanup_minidump(struct netxen_adapter *adapter)
+{
+ kfree(adapter->mdump.md_template);
+ adapter->mdump.md_template = NULL;
+
+ if (adapter->mdump.md_capture_buff) {
+ vfree(adapter->mdump.md_capture_buff);
+ adapter->mdump.md_capture_buff = NULL;
+ }
+}
+
+static void netxen_nic_remove(struct pci_dev *pdev)
+{
+ struct netxen_adapter *adapter;
+ struct net_device *netdev;
+
+ adapter = pci_get_drvdata(pdev);
+ if (adapter == NULL)
+ return;
+
+ netdev = adapter->netdev;
+
+ netxen_cancel_fw_work(adapter);
+
+ unregister_netdev(netdev);
+
+ cancel_work_sync(&adapter->tx_timeout_task);
+
+ netxen_free_ip_list(adapter, false);
+ netxen_nic_detach(adapter);
+
+ nx_decr_dev_ref_cnt(adapter);
+
+ if (adapter->portnum == 0)
+ netxen_free_dummy_dma(adapter);
+
+ clear_bit(__NX_RESETTING, &adapter->state);
+
+ netxen_teardown_intr(adapter);
+ netxen_set_interrupt_mode(adapter, 0);
+ netxen_remove_diag_entries(adapter);
+
+ netxen_cleanup_pci_map(adapter);
+
+ netxen_release_firmware(adapter);
+
+ if (NX_IS_REVISION_P3(pdev->revision)) {
+ netxen_cleanup_minidump(adapter);
+ pci_disable_pcie_error_reporting(pdev);
+ }
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+
+ free_netdev(netdev);
+}
+
+static void netxen_nic_detach_func(struct netxen_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ netif_device_detach(netdev);
+
+ netxen_cancel_fw_work(adapter);
+
+ if (netif_running(netdev))
+ netxen_nic_down(adapter, netdev);
+
+ cancel_work_sync(&adapter->tx_timeout_task);
+
+ netxen_nic_detach(adapter);
+
+ if (adapter->portnum == 0)
+ netxen_free_dummy_dma(adapter);
+
+ nx_decr_dev_ref_cnt(adapter);
+
+ clear_bit(__NX_RESETTING, &adapter->state);
+}
+
+static int netxen_nic_attach_late_func(struct pci_dev *pdev)
+{
+ struct netxen_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+ int err;
+
+ pci_set_master(pdev);
+
+ adapter->ahw.crb_win = -1;
+ adapter->ahw.ocm_win = -1;
+
+ err = netxen_start_firmware(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "failed to start firmware\n");
+ return err;
+ }
+
+ if (netif_running(netdev)) {
+ err = netxen_nic_attach(adapter);
+ if (err)
+ goto err_out;
+
+ err = netxen_nic_up(adapter, netdev);
+ if (err)
+ goto err_out_detach;
+
+ netxen_restore_indev_addr(netdev, NETDEV_UP);
+ }
+
+ netif_device_attach(netdev);
+ netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY);
+ return 0;
+
+err_out_detach:
+ netxen_nic_detach(adapter);
+err_out:
+ nx_decr_dev_ref_cnt(adapter);
+ return err;
+}
+
+static int netxen_nic_attach_func(struct pci_dev *pdev)
+{
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+
+ return netxen_nic_attach_late_func(pdev);
+}
+
+static pci_ers_result_t netxen_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct netxen_adapter *adapter = pci_get_drvdata(pdev);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ if (nx_dev_request_aer(adapter))
+ return PCI_ERS_RESULT_RECOVERED;
+
+ netxen_nic_detach_func(adapter);
+
+ pci_disable_device(pdev);
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t netxen_io_slot_reset(struct pci_dev *pdev)
+{
+ int err = 0;
+
+ err = netxen_nic_attach_func(pdev);
+
+ return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
+}
+
+static void netxen_nic_shutdown(struct pci_dev *pdev)
+{
+ struct netxen_adapter *adapter = pci_get_drvdata(pdev);
+
+ netxen_nic_detach_func(adapter);
+
+ if (pci_save_state(pdev))
+ return;
+
+ if (netxen_nic_wol_supported(adapter)) {
+ pci_enable_wake(pdev, PCI_D3cold, 1);
+ pci_enable_wake(pdev, PCI_D3hot, 1);
+ }
+
+ pci_disable_device(pdev);
+}
+
+static int __maybe_unused
+netxen_nic_suspend(struct device *dev_d)
+{
+ struct netxen_adapter *adapter = dev_get_drvdata(dev_d);
+
+ netxen_nic_detach_func(adapter);
+
+ if (netxen_nic_wol_supported(adapter))
+ device_wakeup_enable(dev_d);
+
+ return 0;
+}
+
+static int __maybe_unused
+netxen_nic_resume(struct device *dev_d)
+{
+ return netxen_nic_attach_late_func(to_pci_dev(dev_d));
+}
+
+static int netxen_nic_open(struct net_device *netdev)
+{
+ struct netxen_adapter *adapter = netdev_priv(netdev);
+ int err = 0;
+
+ if (adapter->driver_mismatch)
+ return -EIO;
+
+ err = netxen_nic_attach(adapter);
+ if (err)
+ return err;
+
+ err = __netxen_nic_up(adapter, netdev);
+ if (err)
+ goto err_out;
+
+ netif_start_queue(netdev);
+
+ return 0;
+
+err_out:
+ netxen_nic_detach(adapter);
+ return err;
+}
+
+/*
+ * netxen_nic_close - Disables a network interface entry point
+ */
+static int netxen_nic_close(struct net_device *netdev)
+{
+ struct netxen_adapter *adapter = netdev_priv(netdev);
+
+ __netxen_nic_down(adapter, netdev);
+ return 0;
+}
+
+static void
+netxen_tso_check(struct net_device *netdev,
+ struct nx_host_tx_ring *tx_ring,
+ struct cmd_desc_type0 *first_desc,
+ struct sk_buff *skb)
+{
+ u8 opcode = TX_ETHER_PKT;
+ __be16 protocol = skb->protocol;
+ u16 flags = 0, vid = 0;
+ u32 producer;
+ int copied, offset, copy_len, hdr_len = 0, tso = 0, vlan_oob = 0;
+ struct cmd_desc_type0 *hwdesc;
+ struct vlan_ethhdr *vh;
+
+ if (protocol == cpu_to_be16(ETH_P_8021Q)) {
+
+ vh = skb_vlan_eth_hdr(skb);
+ protocol = vh->h_vlan_encapsulated_proto;
+ flags = FLAGS_VLAN_TAGGED;
+
+ } else if (skb_vlan_tag_present(skb)) {
+ flags = FLAGS_VLAN_OOB;
+ vid = skb_vlan_tag_get(skb);
+ netxen_set_tx_vlan_tci(first_desc, vid);
+ vlan_oob = 1;
+ }
+
+ if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
+ skb_shinfo(skb)->gso_size > 0) {
+
+ hdr_len = skb_tcp_all_headers(skb);
+
+ first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
+ first_desc->total_hdr_length = hdr_len;
+ if (vlan_oob) {
+ first_desc->total_hdr_length += VLAN_HLEN;
+ first_desc->tcp_hdr_offset = VLAN_HLEN;
+ first_desc->ip_hdr_offset = VLAN_HLEN;
+ /* Only in case of TSO on vlan device */
+ flags |= FLAGS_VLAN_TAGGED;
+ }
+
+ opcode = (protocol == cpu_to_be16(ETH_P_IPV6)) ?
+ TX_TCP_LSO6 : TX_TCP_LSO;
+ tso = 1;
+
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ u8 l4proto;
+
+ if (protocol == cpu_to_be16(ETH_P_IP)) {
+ l4proto = ip_hdr(skb)->protocol;
+
+ if (l4proto == IPPROTO_TCP)
+ opcode = TX_TCP_PKT;
+ else if(l4proto == IPPROTO_UDP)
+ opcode = TX_UDP_PKT;
+ } else if (protocol == cpu_to_be16(ETH_P_IPV6)) {
+ l4proto = ipv6_hdr(skb)->nexthdr;
+
+ if (l4proto == IPPROTO_TCP)
+ opcode = TX_TCPV6_PKT;
+ else if(l4proto == IPPROTO_UDP)
+ opcode = TX_UDPV6_PKT;
+ }
+ }
+
+ first_desc->tcp_hdr_offset += skb_transport_offset(skb);
+ first_desc->ip_hdr_offset += skb_network_offset(skb);
+ netxen_set_tx_flags_opcode(first_desc, flags, opcode);
+
+ if (!tso)
+ return;
+
+ /* For LSO, we need to copy the MAC/IP/TCP headers into
+ * the descriptor ring
+ */
+ producer = tx_ring->producer;
+ copied = 0;
+ offset = 2;
+
+ if (vlan_oob) {
+ /* Create a TSO vlan header template for firmware */
+
+ hwdesc = &tx_ring->desc_head[producer];
+ tx_ring->cmd_buf_arr[producer].skb = NULL;
+
+ copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
+ hdr_len + VLAN_HLEN);
+
+ vh = (struct vlan_ethhdr *)((char *)hwdesc + 2);
+ skb_copy_from_linear_data(skb, vh, 12);
+ vh->h_vlan_proto = htons(ETH_P_8021Q);
+ vh->h_vlan_TCI = htons(vid);
+ skb_copy_from_linear_data_offset(skb, 12,
+ (char *)vh + 16, copy_len - 16);
+
+ copied = copy_len - VLAN_HLEN;
+ offset = 0;
+
+ producer = get_next_index(producer, tx_ring->num_desc);
+ }
+
+ while (copied < hdr_len) {
+
+ copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
+ (hdr_len - copied));
+
+ hwdesc = &tx_ring->desc_head[producer];
+ tx_ring->cmd_buf_arr[producer].skb = NULL;
+
+ skb_copy_from_linear_data_offset(skb, copied,
+ (char *)hwdesc + offset, copy_len);
+
+ copied += copy_len;
+ offset = 0;
+
+ producer = get_next_index(producer, tx_ring->num_desc);
+ }
+
+ tx_ring->producer = producer;
+ barrier();
+}
+
+static int
+netxen_map_tx_skb(struct pci_dev *pdev,
+ struct sk_buff *skb, struct netxen_cmd_buffer *pbuf)
+{
+ struct netxen_skb_frag *nf;
+ skb_frag_t *frag;
+ int i, nr_frags;
+ dma_addr_t map;
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ nf = &pbuf->frag_array[0];
+
+ map = dma_map_single(&pdev->dev, skb->data, skb_headlen(skb),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, map))
+ goto out_err;
+
+ nf->dma = map;
+ nf->length = skb_headlen(skb);
+
+ for (i = 0; i < nr_frags; i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ nf = &pbuf->frag_array[i+1];
+
+ map = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, map))
+ goto unwind;
+
+ nf->dma = map;
+ nf->length = skb_frag_size(frag);
+ }
+
+ return 0;
+
+unwind:
+ while (--i >= 0) {
+ nf = &pbuf->frag_array[i+1];
+ dma_unmap_page(&pdev->dev, nf->dma, nf->length, DMA_TO_DEVICE);
+ nf->dma = 0ULL;
+ }
+
+ nf = &pbuf->frag_array[0];
+ dma_unmap_single(&pdev->dev, nf->dma, skb_headlen(skb), DMA_TO_DEVICE);
+ nf->dma = 0ULL;
+
+out_err:
+ return -ENOMEM;
+}
+
+static inline void
+netxen_clear_cmddesc(u64 *desc)
+{
+ desc[0] = 0ULL;
+ desc[2] = 0ULL;
+}
+
+static netdev_tx_t
+netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct netxen_adapter *adapter = netdev_priv(netdev);
+ struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
+ struct netxen_cmd_buffer *pbuf;
+ struct netxen_skb_frag *buffrag;
+ struct cmd_desc_type0 *hwdesc, *first_desc;
+ struct pci_dev *pdev;
+ int i, k;
+ int delta = 0;
+ skb_frag_t *frag;
+
+ u32 producer;
+ int frag_count;
+ u32 num_txd = tx_ring->num_desc;
+
+ frag_count = skb_shinfo(skb)->nr_frags + 1;
+
+ /* 14 frags supported for normal packet and
+ * 32 frags supported for TSO packet
+ */
+ if (!skb_is_gso(skb) && frag_count > NETXEN_MAX_FRAGS_PER_TX) {
+
+ for (i = 0; i < (frag_count - NETXEN_MAX_FRAGS_PER_TX); i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ delta += skb_frag_size(frag);
+ }
+
+ if (!__pskb_pull_tail(skb, delta))
+ goto drop_packet;
+
+ frag_count = 1 + skb_shinfo(skb)->nr_frags;
+ }
+
+ if (unlikely(netxen_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
+ netif_stop_queue(netdev);
+ smp_mb();
+ if (netxen_tx_avail(tx_ring) > TX_STOP_THRESH)
+ netif_start_queue(netdev);
+ else
+ return NETDEV_TX_BUSY;
+ }
+
+ producer = tx_ring->producer;
+ pbuf = &tx_ring->cmd_buf_arr[producer];
+
+ pdev = adapter->pdev;
+
+ if (netxen_map_tx_skb(pdev, skb, pbuf))
+ goto drop_packet;
+
+ pbuf->skb = skb;
+ pbuf->frag_count = frag_count;
+
+ first_desc = hwdesc = &tx_ring->desc_head[producer];
+ netxen_clear_cmddesc((u64 *)hwdesc);
+
+ netxen_set_tx_frags_len(first_desc, frag_count, skb->len);
+ netxen_set_tx_port(first_desc, adapter->portnum);
+
+ for (i = 0; i < frag_count; i++) {
+
+ k = i % 4;
+
+ if ((k == 0) && (i > 0)) {
+ /* move to next desc.*/
+ producer = get_next_index(producer, num_txd);
+ hwdesc = &tx_ring->desc_head[producer];
+ netxen_clear_cmddesc((u64 *)hwdesc);
+ tx_ring->cmd_buf_arr[producer].skb = NULL;
+ }
+
+ buffrag = &pbuf->frag_array[i];
+
+ hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
+ switch (k) {
+ case 0:
+ hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
+ break;
+ case 1:
+ hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
+ break;
+ case 2:
+ hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
+ break;
+ case 3:
+ hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
+ break;
+ }
+ }
+
+ tx_ring->producer = get_next_index(producer, num_txd);
+
+ netxen_tso_check(netdev, tx_ring, first_desc, skb);
+
+ adapter->stats.txbytes += skb->len;
+ adapter->stats.xmitcalled++;
+
+ netxen_nic_update_cmd_producer(adapter, tx_ring);
+
+ return NETDEV_TX_OK;
+
+drop_packet:
+ adapter->stats.txdropped++;
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+static int netxen_nic_check_temp(struct netxen_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ uint32_t temp, temp_state, temp_val;
+ int rv = 0;
+
+ temp = NXRD32(adapter, CRB_TEMP_STATE);
+
+ temp_state = nx_get_temp_state(temp);
+ temp_val = nx_get_temp_val(temp);
+
+ if (temp_state == NX_TEMP_PANIC) {
+ printk(KERN_ALERT
+ "%s: Device temperature %d degrees C exceeds"
+ " maximum allowed. Hardware has been shut down.\n",
+ netdev->name, temp_val);
+ rv = 1;
+ } else if (temp_state == NX_TEMP_WARN) {
+ if (adapter->temp == NX_TEMP_NORMAL) {
+ printk(KERN_ALERT
+ "%s: Device temperature %d degrees C "
+ "exceeds operating range."
+ " Immediate action needed.\n",
+ netdev->name, temp_val);
+ }
+ } else {
+ if (adapter->temp == NX_TEMP_WARN) {
+ printk(KERN_INFO
+ "%s: Device temperature is now %d degrees C"
+ " in normal range.\n", netdev->name,
+ temp_val);
+ }
+ }
+ adapter->temp = temp_state;
+ return rv;
+}
+
+void netxen_advert_link_change(struct netxen_adapter *adapter, int linkup)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ if (adapter->ahw.linkup && !linkup) {
+ printk(KERN_INFO "%s: %s NIC Link is down\n",
+ netxen_nic_driver_name, netdev->name);
+ adapter->ahw.linkup = 0;
+ if (netif_running(netdev)) {
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+ }
+ adapter->link_changed = !adapter->has_link_events;
+ } else if (!adapter->ahw.linkup && linkup) {
+ printk(KERN_INFO "%s: %s NIC Link is up\n",
+ netxen_nic_driver_name, netdev->name);
+ adapter->ahw.linkup = 1;
+ if (netif_running(netdev)) {
+ netif_carrier_on(netdev);
+ netif_wake_queue(netdev);
+ }
+ adapter->link_changed = !adapter->has_link_events;
+ }
+}
+
+static void netxen_nic_handle_phy_intr(struct netxen_adapter *adapter)
+{
+ u32 val, port, linkup;
+
+ port = adapter->physical_port;
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+ val = NXRD32(adapter, CRB_XG_STATE_P3);
+ val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val);
+ linkup = (val == XG_LINK_UP_P3);
+ } else {
+ val = NXRD32(adapter, CRB_XG_STATE);
+ val = (val >> port*8) & 0xff;
+ linkup = (val == XG_LINK_UP);
+ }
+
+ netxen_advert_link_change(adapter, linkup);
+}
+
+static void netxen_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+{
+ struct netxen_adapter *adapter = netdev_priv(netdev);
+
+ if (test_bit(__NX_RESETTING, &adapter->state))
+ return;
+
+ dev_err(&netdev->dev, "transmit timeout, resetting.\n");
+ schedule_work(&adapter->tx_timeout_task);
+}
+
+static void netxen_tx_timeout_task(struct work_struct *work)
+{
+ struct netxen_adapter *adapter =
+ container_of(work, struct netxen_adapter, tx_timeout_task);
+
+ if (!netif_running(adapter->netdev))
+ return;
+
+ if (test_and_set_bit(__NX_RESETTING, &adapter->state))
+ return;
+
+ if (++adapter->tx_timeo_cnt >= NX_MAX_TX_TIMEOUTS)
+ goto request_reset;
+
+ rtnl_lock();
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+ /* try to scrub interrupt */
+ netxen_napi_disable(adapter);
+
+ netxen_napi_enable(adapter);
+
+ netif_wake_queue(adapter->netdev);
+
+ clear_bit(__NX_RESETTING, &adapter->state);
+ } else {
+ clear_bit(__NX_RESETTING, &adapter->state);
+ if (netxen_nic_reset_context(adapter)) {
+ rtnl_unlock();
+ goto request_reset;
+ }
+ }
+ netif_trans_update(adapter->netdev);
+ rtnl_unlock();
+ return;
+
+request_reset:
+ adapter->need_fw_reset = 1;
+ clear_bit(__NX_RESETTING, &adapter->state);
+}
+
+static void netxen_nic_get_stats(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct netxen_adapter *adapter = netdev_priv(netdev);
+
+ stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
+ stats->tx_packets = adapter->stats.xmitfinished;
+ stats->rx_bytes = adapter->stats.rxbytes;
+ stats->tx_bytes = adapter->stats.txbytes;
+ stats->rx_dropped = adapter->stats.rxdropped;
+ stats->tx_dropped = adapter->stats.txdropped;
+}
+
+static irqreturn_t netxen_intr(int irq, void *data)
+{
+ struct nx_host_sds_ring *sds_ring = data;
+ struct netxen_adapter *adapter = sds_ring->adapter;
+ u32 status = 0;
+
+ status = readl(adapter->isr_int_vec);
+
+ if (!(status & adapter->int_vec_bit))
+ return IRQ_NONE;
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+ /* check interrupt state machine, to be sure */
+ status = readl(adapter->crb_int_state_reg);
+ if (!ISR_LEGACY_INT_TRIGGERED(status))
+ return IRQ_NONE;
+
+ } else {
+ unsigned long our_int = 0;
+
+ our_int = readl(adapter->crb_int_state_reg);
+
+ /* not our interrupt */
+ if (!test_and_clear_bit((7 + adapter->portnum), &our_int))
+ return IRQ_NONE;
+
+ /* claim interrupt */
+ writel((our_int & 0xffffffff), adapter->crb_int_state_reg);
+
+ /* clear interrupt */
+ netxen_nic_disable_int(sds_ring);
+ }
+
+ writel(0xffffffff, adapter->tgt_status_reg);
+ /* read twice to ensure write is flushed */
+ readl(adapter->isr_int_vec);
+ readl(adapter->isr_int_vec);
+
+ napi_schedule(&sds_ring->napi);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t netxen_msi_intr(int irq, void *data)
+{
+ struct nx_host_sds_ring *sds_ring = data;
+ struct netxen_adapter *adapter = sds_ring->adapter;
+
+ /* clear interrupt */
+ writel(0xffffffff, adapter->tgt_status_reg);
+
+ napi_schedule(&sds_ring->napi);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t netxen_msix_intr(int irq, void *data)
+{
+ struct nx_host_sds_ring *sds_ring = data;
+
+ napi_schedule(&sds_ring->napi);
+ return IRQ_HANDLED;
+}
+
+static int netxen_nic_poll(struct napi_struct *napi, int budget)
+{
+ struct nx_host_sds_ring *sds_ring =
+ container_of(napi, struct nx_host_sds_ring, napi);
+
+ struct netxen_adapter *adapter = sds_ring->adapter;
+
+ int tx_complete;
+ int work_done;
+
+ tx_complete = netxen_process_cmd_ring(adapter);
+
+ work_done = netxen_process_rcv_ring(sds_ring, budget);
+
+ if (!tx_complete)
+ work_done = budget;
+
+ if (work_done < budget) {
+ napi_complete_done(&sds_ring->napi, work_done);
+ if (test_bit(__NX_DEV_UP, &adapter->state))
+ netxen_nic_enable_int(sds_ring);
+ }
+
+ return work_done;
+}
+
+static int
+nx_incr_dev_ref_cnt(struct netxen_adapter *adapter)
+{
+ int count;
+ if (netxen_api_lock(adapter))
+ return -EIO;
+
+ count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT);
+
+ NXWR32(adapter, NX_CRB_DEV_REF_COUNT, ++count);
+
+ netxen_api_unlock(adapter);
+ return count;
+}
+
+static int
+nx_decr_dev_ref_cnt(struct netxen_adapter *adapter)
+{
+ int count, state;
+ if (netxen_api_lock(adapter))
+ return -EIO;
+
+ count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT);
+ WARN_ON(count == 0);
+
+ NXWR32(adapter, NX_CRB_DEV_REF_COUNT, --count);
+ state = NXRD32(adapter, NX_CRB_DEV_STATE);
+
+ if (count == 0 && state != NX_DEV_FAILED)
+ NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_COLD);
+
+ netxen_api_unlock(adapter);
+ return count;
+}
+
+static int
+nx_dev_request_aer(struct netxen_adapter *adapter)
+{
+ u32 state;
+ int ret = -EINVAL;
+
+ if (netxen_api_lock(adapter))
+ return ret;
+
+ state = NXRD32(adapter, NX_CRB_DEV_STATE);
+
+ if (state == NX_DEV_NEED_AER)
+ ret = 0;
+ else if (state == NX_DEV_READY) {
+ NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_NEED_AER);
+ ret = 0;
+ }
+
+ netxen_api_unlock(adapter);
+ return ret;
+}
+
+int
+nx_dev_request_reset(struct netxen_adapter *adapter)
+{
+ u32 state;
+ int ret = -EINVAL;
+
+ if (netxen_api_lock(adapter))
+ return ret;
+
+ state = NXRD32(adapter, NX_CRB_DEV_STATE);
+
+ if (state == NX_DEV_NEED_RESET || state == NX_DEV_FAILED)
+ ret = 0;
+ else if (state != NX_DEV_INITALIZING && state != NX_DEV_NEED_AER) {
+ NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_NEED_RESET);
+ adapter->flags |= NETXEN_FW_RESET_OWNER;
+ ret = 0;
+ }
+
+ netxen_api_unlock(adapter);
+
+ return ret;
+}
+
+static int
+netxen_can_start_firmware(struct netxen_adapter *adapter)
+{
+ int count;
+ int can_start = 0;
+
+ if (netxen_api_lock(adapter)) {
+ nx_incr_dev_ref_cnt(adapter);
+ return -1;
+ }
+
+ count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT);
+
+ if ((count < 0) || (count >= NX_MAX_PCI_FUNC))
+ count = 0;
+
+ if (count == 0) {
+ can_start = 1;
+ NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_INITALIZING);
+ }
+
+ NXWR32(adapter, NX_CRB_DEV_REF_COUNT, ++count);
+
+ netxen_api_unlock(adapter);
+
+ return can_start;
+}
+
+static void
+netxen_schedule_work(struct netxen_adapter *adapter,
+ work_func_t func, int delay)
+{
+ INIT_DELAYED_WORK(&adapter->fw_work, func);
+ schedule_delayed_work(&adapter->fw_work, delay);
+}
+
+static void
+netxen_cancel_fw_work(struct netxen_adapter *adapter)
+{
+ while (test_and_set_bit(__NX_RESETTING, &adapter->state))
+ msleep(10);
+
+ cancel_delayed_work_sync(&adapter->fw_work);
+}
+
+static void
+netxen_attach_work(struct work_struct *work)
+{
+ struct netxen_adapter *adapter = container_of(work,
+ struct netxen_adapter, fw_work.work);
+ struct net_device *netdev = adapter->netdev;
+ int err = 0;
+
+ if (netif_running(netdev)) {
+ err = netxen_nic_attach(adapter);
+ if (err)
+ goto done;
+
+ err = netxen_nic_up(adapter, netdev);
+ if (err) {
+ netxen_nic_detach(adapter);
+ goto done;
+ }
+
+ netxen_restore_indev_addr(netdev, NETDEV_UP);
+ }
+
+ netif_device_attach(netdev);
+
+done:
+ adapter->fw_fail_cnt = 0;
+ clear_bit(__NX_RESETTING, &adapter->state);
+ netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY);
+}
+
+static void
+netxen_fwinit_work(struct work_struct *work)
+{
+ struct netxen_adapter *adapter = container_of(work,
+ struct netxen_adapter, fw_work.work);
+ int dev_state;
+ int count;
+ dev_state = NXRD32(adapter, NX_CRB_DEV_STATE);
+ if (adapter->flags & NETXEN_FW_RESET_OWNER) {
+ count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT);
+ WARN_ON(count == 0);
+ if (count == 1) {
+ if (adapter->mdump.md_enabled) {
+ rtnl_lock();
+ netxen_dump_fw(adapter);
+ rtnl_unlock();
+ }
+ adapter->flags &= ~NETXEN_FW_RESET_OWNER;
+ if (netxen_api_lock(adapter)) {
+ clear_bit(__NX_RESETTING, &adapter->state);
+ NXWR32(adapter, NX_CRB_DEV_STATE,
+ NX_DEV_FAILED);
+ return;
+ }
+ count = NXRD32(adapter, NX_CRB_DEV_REF_COUNT);
+ NXWR32(adapter, NX_CRB_DEV_REF_COUNT, --count);
+ NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_COLD);
+ dev_state = NX_DEV_COLD;
+ netxen_api_unlock(adapter);
+ }
+ }
+
+ switch (dev_state) {
+ case NX_DEV_COLD:
+ case NX_DEV_READY:
+ if (!netxen_start_firmware(adapter)) {
+ netxen_schedule_work(adapter, netxen_attach_work, 0);
+ return;
+ }
+ break;
+
+ case NX_DEV_NEED_RESET:
+ case NX_DEV_INITALIZING:
+ netxen_schedule_work(adapter,
+ netxen_fwinit_work, 2 * FW_POLL_DELAY);
+ return;
+
+ case NX_DEV_FAILED:
+ default:
+ nx_incr_dev_ref_cnt(adapter);
+ break;
+ }
+
+ if (netxen_api_lock(adapter)) {
+ clear_bit(__NX_RESETTING, &adapter->state);
+ return;
+ }
+ NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_FAILED);
+ netxen_api_unlock(adapter);
+ dev_err(&adapter->pdev->dev, "%s: Device initialization Failed\n",
+ adapter->netdev->name);
+
+ clear_bit(__NX_RESETTING, &adapter->state);
+}
+
+static void
+netxen_detach_work(struct work_struct *work)
+{
+ struct netxen_adapter *adapter = container_of(work,
+ struct netxen_adapter, fw_work.work);
+ struct net_device *netdev = adapter->netdev;
+ int ref_cnt = 0, delay;
+ u32 status;
+
+ netif_device_detach(netdev);
+
+ netxen_nic_down(adapter, netdev);
+
+ rtnl_lock();
+ netxen_nic_detach(adapter);
+ rtnl_unlock();
+
+ status = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1);
+
+ if (status & NX_RCODE_FATAL_ERROR)
+ goto err_ret;
+
+ if (adapter->temp == NX_TEMP_PANIC)
+ goto err_ret;
+
+ if (!(adapter->flags & NETXEN_FW_RESET_OWNER))
+ ref_cnt = nx_decr_dev_ref_cnt(adapter);
+
+ if (ref_cnt == -EIO)
+ goto err_ret;
+
+ delay = (ref_cnt == 0) ? 0 : (2 * FW_POLL_DELAY);
+
+ adapter->fw_wait_cnt = 0;
+ netxen_schedule_work(adapter, netxen_fwinit_work, delay);
+
+ return;
+
+err_ret:
+ clear_bit(__NX_RESETTING, &adapter->state);
+}
+
+static int
+netxen_check_health(struct netxen_adapter *adapter)
+{
+ u32 state, heartbit;
+ u32 peg_status;
+ struct net_device *netdev = adapter->netdev;
+
+ state = NXRD32(adapter, NX_CRB_DEV_STATE);
+ if (state == NX_DEV_NEED_AER)
+ return 0;
+
+ if (netxen_nic_check_temp(adapter))
+ goto detach;
+
+ if (adapter->need_fw_reset) {
+ if (nx_dev_request_reset(adapter))
+ return 0;
+ goto detach;
+ }
+
+ /* NX_DEV_NEED_RESET, this state can be marked in two cases
+ * 1. Tx timeout 2. Fw hang
+ * Send request to destroy context in case of tx timeout only
+ * and doesn't required in case of Fw hang
+ */
+ if (state == NX_DEV_NEED_RESET || state == NX_DEV_FAILED) {
+ adapter->need_fw_reset = 1;
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ goto detach;
+ }
+
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ return 0;
+
+ heartbit = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER);
+ if (heartbit != adapter->heartbit) {
+ adapter->heartbit = heartbit;
+ adapter->fw_fail_cnt = 0;
+ if (adapter->need_fw_reset)
+ goto detach;
+ return 0;
+ }
+
+ if (++adapter->fw_fail_cnt < FW_FAIL_THRESH)
+ return 0;
+
+ if (nx_dev_request_reset(adapter))
+ return 0;
+
+ clear_bit(__NX_FW_ATTACHED, &adapter->state);
+
+ dev_err(&netdev->dev, "firmware hang detected\n");
+ peg_status = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1);
+ dev_err(&adapter->pdev->dev, "Dumping hw/fw registers\n"
+ "PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n"
+ "PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n"
+ "PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n"
+ "PEG_NET_4_PC: 0x%x\n",
+ peg_status,
+ NXRD32(adapter, NETXEN_PEG_HALT_STATUS2),
+ NXRD32(adapter, NETXEN_CRB_PEG_NET_0 + 0x3c),
+ NXRD32(adapter, NETXEN_CRB_PEG_NET_1 + 0x3c),
+ NXRD32(adapter, NETXEN_CRB_PEG_NET_2 + 0x3c),
+ NXRD32(adapter, NETXEN_CRB_PEG_NET_3 + 0x3c),
+ NXRD32(adapter, NETXEN_CRB_PEG_NET_4 + 0x3c));
+ if (NX_FWERROR_PEGSTAT1(peg_status) == 0x67)
+ dev_err(&adapter->pdev->dev,
+ "Firmware aborted with error code 0x00006700. "
+ "Device is being reset.\n");
+detach:
+ if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) &&
+ !test_and_set_bit(__NX_RESETTING, &adapter->state))
+ netxen_schedule_work(adapter, netxen_detach_work, 0);
+ return 1;
+}
+
+static void
+netxen_fw_poll_work(struct work_struct *work)
+{
+ struct netxen_adapter *adapter = container_of(work,
+ struct netxen_adapter, fw_work.work);
+
+ if (test_bit(__NX_RESETTING, &adapter->state))
+ goto reschedule;
+
+ if (test_bit(__NX_DEV_UP, &adapter->state) &&
+ !(adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION)) {
+ if (!adapter->has_link_events) {
+
+ netxen_nic_handle_phy_intr(adapter);
+
+ if (adapter->link_changed)
+ netxen_nic_set_link_parameters(adapter);
+ }
+ }
+
+ if (netxen_check_health(adapter))
+ return;
+
+reschedule:
+ netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY);
+}
+
+static ssize_t
+netxen_store_bridged_mode(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct net_device *net = to_net_dev(dev);
+ struct netxen_adapter *adapter = netdev_priv(net);
+ unsigned long new;
+ int ret = -EINVAL;
+
+ if (!(adapter->capabilities & NX_FW_CAPABILITY_BDG))
+ goto err_out;
+
+ if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
+ goto err_out;
+
+ if (kstrtoul(buf, 2, &new))
+ goto err_out;
+
+ if (!netxen_config_bridged_mode(adapter, !!new))
+ ret = len;
+
+err_out:
+ return ret;
+}
+
+static ssize_t
+netxen_show_bridged_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *net = to_net_dev(dev);
+ struct netxen_adapter *adapter;
+ int bridged_mode = 0;
+
+ adapter = netdev_priv(net);
+
+ if (adapter->capabilities & NX_FW_CAPABILITY_BDG)
+ bridged_mode = !!(adapter->flags & NETXEN_NIC_BRIDGE_ENABLED);
+
+ return sprintf(buf, "%d\n", bridged_mode);
+}
+
+static const struct device_attribute dev_attr_bridged_mode = {
+ .attr = { .name = "bridged_mode", .mode = 0644 },
+ .show = netxen_show_bridged_mode,
+ .store = netxen_store_bridged_mode,
+};
+
+static ssize_t
+netxen_store_diag_mode(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct netxen_adapter *adapter = dev_get_drvdata(dev);
+ unsigned long new;
+
+ if (kstrtoul(buf, 2, &new))
+ return -EINVAL;
+
+ if (!!new != !!(adapter->flags & NETXEN_NIC_DIAG_ENABLED))
+ adapter->flags ^= NETXEN_NIC_DIAG_ENABLED;
+
+ return len;
+}
+
+static ssize_t
+netxen_show_diag_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct netxen_adapter *adapter = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n",
+ !!(adapter->flags & NETXEN_NIC_DIAG_ENABLED));
+}
+
+static const struct device_attribute dev_attr_diag_mode = {
+ .attr = { .name = "diag_mode", .mode = 0644 },
+ .show = netxen_show_diag_mode,
+ .store = netxen_store_diag_mode,
+};
+
+static int
+netxen_sysfs_validate_crb(struct netxen_adapter *adapter,
+ loff_t offset, size_t size)
+{
+ size_t crb_size = 4;
+
+ if (!(adapter->flags & NETXEN_NIC_DIAG_ENABLED))
+ return -EIO;
+
+ if (offset < NETXEN_PCI_CRBSPACE) {
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ return -EINVAL;
+
+ if (ADDR_IN_RANGE(offset, NETXEN_PCI_CAMQM,
+ NETXEN_PCI_CAMQM_2M_END))
+ crb_size = 8;
+ else
+ return -EINVAL;
+ }
+
+ if ((size != crb_size) || (offset & (crb_size-1)))
+ return -EINVAL;
+
+ return 0;
+}
+
+static ssize_t
+netxen_sysfs_read_crb(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t offset, size_t size)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct netxen_adapter *adapter = dev_get_drvdata(dev);
+ u32 data;
+ u64 qmdata;
+ int ret;
+
+ ret = netxen_sysfs_validate_crb(adapter, offset, size);
+ if (ret != 0)
+ return ret;
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id) &&
+ ADDR_IN_RANGE(offset, NETXEN_PCI_CAMQM,
+ NETXEN_PCI_CAMQM_2M_END)) {
+ netxen_pci_camqm_read_2M(adapter, offset, &qmdata);
+ memcpy(buf, &qmdata, size);
+ } else {
+ data = NXRD32(adapter, offset);
+ memcpy(buf, &data, size);
+ }
+
+ return size;
+}
+
+static ssize_t
+netxen_sysfs_write_crb(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t offset, size_t size)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct netxen_adapter *adapter = dev_get_drvdata(dev);
+ u32 data;
+ u64 qmdata;
+ int ret;
+
+ ret = netxen_sysfs_validate_crb(adapter, offset, size);
+ if (ret != 0)
+ return ret;
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id) &&
+ ADDR_IN_RANGE(offset, NETXEN_PCI_CAMQM,
+ NETXEN_PCI_CAMQM_2M_END)) {
+ memcpy(&qmdata, buf, size);
+ netxen_pci_camqm_write_2M(adapter, offset, qmdata);
+ } else {
+ memcpy(&data, buf, size);
+ NXWR32(adapter, offset, data);
+ }
+
+ return size;
+}
+
+static int
+netxen_sysfs_validate_mem(struct netxen_adapter *adapter,
+ loff_t offset, size_t size)
+{
+ if (!(adapter->flags & NETXEN_NIC_DIAG_ENABLED))
+ return -EIO;
+
+ if ((size != 8) || (offset & 0x7))
+ return -EIO;
+
+ return 0;
+}
+
+static ssize_t
+netxen_sysfs_read_mem(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t offset, size_t size)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct netxen_adapter *adapter = dev_get_drvdata(dev);
+ u64 data;
+ int ret;
+
+ ret = netxen_sysfs_validate_mem(adapter, offset, size);
+ if (ret != 0)
+ return ret;
+
+ if (adapter->pci_mem_read(adapter, offset, &data))
+ return -EIO;
+
+ memcpy(buf, &data, size);
+
+ return size;
+}
+
+static ssize_t netxen_sysfs_write_mem(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t offset, size_t size)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct netxen_adapter *adapter = dev_get_drvdata(dev);
+ u64 data;
+ int ret;
+
+ ret = netxen_sysfs_validate_mem(adapter, offset, size);
+ if (ret != 0)
+ return ret;
+
+ memcpy(&data, buf, size);
+
+ if (adapter->pci_mem_write(adapter, offset, data))
+ return -EIO;
+
+ return size;
+}
+
+
+static const struct bin_attribute bin_attr_crb = {
+ .attr = { .name = "crb", .mode = 0644 },
+ .size = 0,
+ .read = netxen_sysfs_read_crb,
+ .write = netxen_sysfs_write_crb,
+};
+
+static const struct bin_attribute bin_attr_mem = {
+ .attr = { .name = "mem", .mode = 0644 },
+ .size = 0,
+ .read = netxen_sysfs_read_mem,
+ .write = netxen_sysfs_write_mem,
+};
+
+static ssize_t
+netxen_sysfs_read_dimm(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t offset, size_t size)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct netxen_adapter *adapter = dev_get_drvdata(dev);
+ struct net_device *netdev = adapter->netdev;
+ struct netxen_dimm_cfg dimm;
+ u8 dw, rows, cols, banks, ranks;
+ u32 val;
+
+ if (size < attr->size) {
+ netdev_err(netdev, "Invalid size\n");
+ return -EINVAL;
+ }
+
+ memset(&dimm, 0, sizeof(struct netxen_dimm_cfg));
+ val = NXRD32(adapter, NETXEN_DIMM_CAPABILITY);
+
+ /* Checks if DIMM info is valid. */
+ if (val & NETXEN_DIMM_VALID_FLAG) {
+ netdev_err(netdev, "Invalid DIMM flag\n");
+ dimm.presence = 0xff;
+ goto out;
+ }
+
+ rows = NETXEN_DIMM_NUMROWS(val);
+ cols = NETXEN_DIMM_NUMCOLS(val);
+ ranks = NETXEN_DIMM_NUMRANKS(val);
+ banks = NETXEN_DIMM_NUMBANKS(val);
+ dw = NETXEN_DIMM_DATAWIDTH(val);
+
+ dimm.presence = (val & NETXEN_DIMM_PRESENT);
+
+ /* Checks if DIMM info is present. */
+ if (!dimm.presence) {
+ netdev_err(netdev, "DIMM not present\n");
+ goto out;
+ }
+
+ dimm.dimm_type = NETXEN_DIMM_TYPE(val);
+
+ switch (dimm.dimm_type) {
+ case NETXEN_DIMM_TYPE_RDIMM:
+ case NETXEN_DIMM_TYPE_UDIMM:
+ case NETXEN_DIMM_TYPE_SO_DIMM:
+ case NETXEN_DIMM_TYPE_Micro_DIMM:
+ case NETXEN_DIMM_TYPE_Mini_RDIMM:
+ case NETXEN_DIMM_TYPE_Mini_UDIMM:
+ break;
+ default:
+ netdev_err(netdev, "Invalid DIMM type %x\n", dimm.dimm_type);
+ goto out;
+ }
+
+ if (val & NETXEN_DIMM_MEMTYPE_DDR2_SDRAM)
+ dimm.mem_type = NETXEN_DIMM_MEM_DDR2_SDRAM;
+ else
+ dimm.mem_type = NETXEN_DIMM_MEMTYPE(val);
+
+ if (val & NETXEN_DIMM_SIZE) {
+ dimm.size = NETXEN_DIMM_STD_MEM_SIZE;
+ goto out;
+ }
+
+ if (!rows) {
+ netdev_err(netdev, "Invalid no of rows %x\n", rows);
+ goto out;
+ }
+
+ if (!cols) {
+ netdev_err(netdev, "Invalid no of columns %x\n", cols);
+ goto out;
+ }
+
+ if (!banks) {
+ netdev_err(netdev, "Invalid no of banks %x\n", banks);
+ goto out;
+ }
+
+ ranks += 1;
+
+ switch (dw) {
+ case 0x0:
+ dw = 32;
+ break;
+ case 0x1:
+ dw = 33;
+ break;
+ case 0x2:
+ dw = 36;
+ break;
+ case 0x3:
+ dw = 64;
+ break;
+ case 0x4:
+ dw = 72;
+ break;
+ case 0x5:
+ dw = 80;
+ break;
+ case 0x6:
+ dw = 128;
+ break;
+ case 0x7:
+ dw = 144;
+ break;
+ default:
+ netdev_err(netdev, "Invalid data-width %x\n", dw);
+ goto out;
+ }
+
+ dimm.size = ((1 << rows) * (1 << cols) * dw * banks * ranks) / 8;
+ /* Size returned in MB. */
+ dimm.size = (dimm.size) / 0x100000;
+out:
+ memcpy(buf, &dimm, sizeof(struct netxen_dimm_cfg));
+ return sizeof(struct netxen_dimm_cfg);
+
+}
+
+static const struct bin_attribute bin_attr_dimm = {
+ .attr = { .name = "dimm", .mode = 0644 },
+ .size = sizeof(struct netxen_dimm_cfg),
+ .read = netxen_sysfs_read_dimm,
+};
+
+
+static void
+netxen_create_sysfs_entries(struct netxen_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+
+ if (adapter->capabilities & NX_FW_CAPABILITY_BDG) {
+ /* bridged_mode control */
+ if (device_create_file(dev, &dev_attr_bridged_mode)) {
+ dev_warn(dev,
+ "failed to create bridged_mode sysfs entry\n");
+ }
+ }
+}
+
+static void
+netxen_remove_sysfs_entries(struct netxen_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+
+ if (adapter->capabilities & NX_FW_CAPABILITY_BDG)
+ device_remove_file(dev, &dev_attr_bridged_mode);
+}
+
+static void
+netxen_create_diag_entries(struct netxen_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct device *dev;
+
+ dev = &pdev->dev;
+ if (device_create_file(dev, &dev_attr_diag_mode))
+ dev_info(dev, "failed to create diag_mode sysfs entry\n");
+ if (device_create_bin_file(dev, &bin_attr_crb))
+ dev_info(dev, "failed to create crb sysfs entry\n");
+ if (device_create_bin_file(dev, &bin_attr_mem))
+ dev_info(dev, "failed to create mem sysfs entry\n");
+ if (device_create_bin_file(dev, &bin_attr_dimm))
+ dev_info(dev, "failed to create dimm sysfs entry\n");
+}
+
+
+static void
+netxen_remove_diag_entries(struct netxen_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct device *dev = &pdev->dev;
+
+ device_remove_file(dev, &dev_attr_diag_mode);
+ device_remove_bin_file(dev, &bin_attr_crb);
+ device_remove_bin_file(dev, &bin_attr_mem);
+ device_remove_bin_file(dev, &bin_attr_dimm);
+}
+
+#ifdef CONFIG_INET
+
+#define is_netxen_netdev(dev) (dev->netdev_ops == &netxen_netdev_ops)
+
+static int
+netxen_destip_supported(struct netxen_adapter *adapter)
+{
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ return 0;
+
+ if (adapter->ahw.cut_through)
+ return 0;
+
+ return 1;
+}
+
+static void
+netxen_free_ip_list(struct netxen_adapter *adapter, bool master)
+{
+ struct nx_ip_list *cur, *tmp_cur;
+
+ list_for_each_entry_safe(cur, tmp_cur, &adapter->ip_list, list) {
+ if (master) {
+ if (cur->master) {
+ netxen_config_ipaddr(adapter, cur->ip_addr,
+ NX_IP_DOWN);
+ list_del(&cur->list);
+ kfree(cur);
+ }
+ } else {
+ netxen_config_ipaddr(adapter, cur->ip_addr, NX_IP_DOWN);
+ list_del(&cur->list);
+ kfree(cur);
+ }
+ }
+}
+
+static bool
+netxen_list_config_ip(struct netxen_adapter *adapter,
+ struct in_ifaddr *ifa, unsigned long event)
+{
+ struct net_device *dev;
+ struct nx_ip_list *cur, *tmp_cur;
+ struct list_head *head;
+ bool ret = false;
+
+ dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
+
+ if (dev == NULL)
+ goto out;
+
+ switch (event) {
+ case NX_IP_UP:
+ list_for_each(head, &adapter->ip_list) {
+ cur = list_entry(head, struct nx_ip_list, list);
+
+ if (cur->ip_addr == ifa->ifa_address)
+ goto out;
+ }
+
+ cur = kzalloc(sizeof(struct nx_ip_list), GFP_ATOMIC);
+ if (cur == NULL)
+ goto out;
+ if (is_vlan_dev(dev))
+ dev = vlan_dev_real_dev(dev);
+ cur->master = !!netif_is_bond_master(dev);
+ cur->ip_addr = ifa->ifa_address;
+ list_add_tail(&cur->list, &adapter->ip_list);
+ netxen_config_ipaddr(adapter, ifa->ifa_address, NX_IP_UP);
+ ret = true;
+ break;
+ case NX_IP_DOWN:
+ list_for_each_entry_safe(cur, tmp_cur,
+ &adapter->ip_list, list) {
+ if (cur->ip_addr == ifa->ifa_address) {
+ list_del(&cur->list);
+ kfree(cur);
+ netxen_config_ipaddr(adapter, ifa->ifa_address,
+ NX_IP_DOWN);
+ ret = true;
+ break;
+ }
+ }
+ }
+out:
+ return ret;
+}
+
+static void
+netxen_config_indev_addr(struct netxen_adapter *adapter,
+ struct net_device *dev, unsigned long event)
+{
+ struct in_device *indev;
+ struct in_ifaddr *ifa;
+
+ if (!netxen_destip_supported(adapter))
+ return;
+
+ indev = in_dev_get(dev);
+ if (!indev)
+ return;
+
+ rcu_read_lock();
+ in_dev_for_each_ifa_rcu(ifa, indev) {
+ switch (event) {
+ case NETDEV_UP:
+ netxen_list_config_ip(adapter, ifa, NX_IP_UP);
+ break;
+ case NETDEV_DOWN:
+ netxen_list_config_ip(adapter, ifa, NX_IP_DOWN);
+ break;
+ default:
+ break;
+ }
+ }
+ rcu_read_unlock();
+ in_dev_put(indev);
+}
+
+static void
+netxen_restore_indev_addr(struct net_device *netdev, unsigned long event)
+
+{
+ struct netxen_adapter *adapter = netdev_priv(netdev);
+ struct nx_ip_list *pos, *tmp_pos;
+ unsigned long ip_event;
+
+ ip_event = (event == NETDEV_UP) ? NX_IP_UP : NX_IP_DOWN;
+ netxen_config_indev_addr(adapter, netdev, event);
+
+ list_for_each_entry_safe(pos, tmp_pos, &adapter->ip_list, list) {
+ netxen_config_ipaddr(adapter, pos->ip_addr, ip_event);
+ }
+}
+
+static inline bool
+netxen_config_checkdev(struct net_device *dev)
+{
+ struct netxen_adapter *adapter;
+
+ if (!is_netxen_netdev(dev))
+ return false;
+ adapter = netdev_priv(dev);
+ if (!adapter)
+ return false;
+ if (!netxen_destip_supported(adapter))
+ return false;
+ if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
+ return false;
+
+ return true;
+}
+
+/**
+ * netxen_config_master - configure addresses based on master
+ * @dev: netxen device
+ * @event: netdev event
+ */
+static void netxen_config_master(struct net_device *dev, unsigned long event)
+{
+ struct net_device *master, *slave;
+ struct netxen_adapter *adapter = netdev_priv(dev);
+
+ rcu_read_lock();
+ master = netdev_master_upper_dev_get_rcu(dev);
+ /*
+ * This is the case where the netxen nic is being
+ * enslaved and is dev_open()ed in bond_enslave()
+ * Now we should program the bond's (and its vlans')
+ * addresses in the netxen NIC.
+ */
+ if (master && netif_is_bond_master(master) &&
+ !netif_is_bond_slave(dev)) {
+ netxen_config_indev_addr(adapter, master, event);
+ for_each_netdev_rcu(&init_net, slave)
+ if (is_vlan_dev(slave) &&
+ vlan_dev_real_dev(slave) == master)
+ netxen_config_indev_addr(adapter, slave, event);
+ }
+ rcu_read_unlock();
+ /*
+ * This is the case where the netxen nic is being
+ * released and is dev_close()ed in bond_release()
+ * just before IFF_BONDING is stripped.
+ */
+ if (!master && dev->priv_flags & IFF_BONDING)
+ netxen_free_ip_list(adapter, true);
+}
+
+static int netxen_netdev_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct netxen_adapter *adapter;
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct net_device *orig_dev = dev;
+ struct net_device *slave;
+
+recheck:
+ if (dev == NULL)
+ goto done;
+
+ if (is_vlan_dev(dev)) {
+ dev = vlan_dev_real_dev(dev);
+ goto recheck;
+ }
+ if (event == NETDEV_UP || event == NETDEV_DOWN) {
+ /* If this is a bonding device, look for netxen-based slaves*/
+ if (netif_is_bond_master(dev)) {
+ rcu_read_lock();
+ for_each_netdev_in_bond_rcu(dev, slave) {
+ if (!netxen_config_checkdev(slave))
+ continue;
+ adapter = netdev_priv(slave);
+ netxen_config_indev_addr(adapter,
+ orig_dev, event);
+ }
+ rcu_read_unlock();
+ } else {
+ if (!netxen_config_checkdev(dev))
+ goto done;
+ adapter = netdev_priv(dev);
+ /* Act only if the actual netxen is the target */
+ if (orig_dev == dev)
+ netxen_config_master(dev, event);
+ netxen_config_indev_addr(adapter, orig_dev, event);
+ }
+ }
+done:
+ return NOTIFY_DONE;
+}
+
+static int
+netxen_inetaddr_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct netxen_adapter *adapter;
+ struct net_device *dev, *slave;
+ struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
+ unsigned long ip_event;
+
+ dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
+ ip_event = (event == NETDEV_UP) ? NX_IP_UP : NX_IP_DOWN;
+recheck:
+ if (dev == NULL)
+ goto done;
+
+ if (is_vlan_dev(dev)) {
+ dev = vlan_dev_real_dev(dev);
+ goto recheck;
+ }
+ if (event == NETDEV_UP || event == NETDEV_DOWN) {
+ /* If this is a bonding device, look for netxen-based slaves*/
+ if (netif_is_bond_master(dev)) {
+ rcu_read_lock();
+ for_each_netdev_in_bond_rcu(dev, slave) {
+ if (!netxen_config_checkdev(slave))
+ continue;
+ adapter = netdev_priv(slave);
+ netxen_list_config_ip(adapter, ifa, ip_event);
+ }
+ rcu_read_unlock();
+ } else {
+ if (!netxen_config_checkdev(dev))
+ goto done;
+ adapter = netdev_priv(dev);
+ netxen_list_config_ip(adapter, ifa, ip_event);
+ }
+ }
+done:
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block netxen_netdev_cb = {
+ .notifier_call = netxen_netdev_event,
+};
+
+static struct notifier_block netxen_inetaddr_cb = {
+ .notifier_call = netxen_inetaddr_event,
+};
+#else
+static void
+netxen_restore_indev_addr(struct net_device *dev, unsigned long event)
+{ }
+static void
+netxen_free_ip_list(struct netxen_adapter *adapter, bool master)
+{ }
+#endif
+
+static const struct pci_error_handlers netxen_err_handler = {
+ .error_detected = netxen_io_error_detected,
+ .slot_reset = netxen_io_slot_reset,
+};
+
+static SIMPLE_DEV_PM_OPS(netxen_nic_pm_ops,
+ netxen_nic_suspend,
+ netxen_nic_resume);
+
+static struct pci_driver netxen_driver = {
+ .name = netxen_nic_driver_name,
+ .id_table = netxen_pci_tbl,
+ .probe = netxen_nic_probe,
+ .remove = netxen_nic_remove,
+ .driver.pm = &netxen_nic_pm_ops,
+ .shutdown = netxen_nic_shutdown,
+ .err_handler = &netxen_err_handler
+};
+
+static int __init netxen_init_module(void)
+{
+ printk(KERN_INFO "%s\n", netxen_nic_driver_string);
+
+#ifdef CONFIG_INET
+ register_netdevice_notifier(&netxen_netdev_cb);
+ register_inetaddr_notifier(&netxen_inetaddr_cb);
+#endif
+ return pci_register_driver(&netxen_driver);
+}
+
+module_init(netxen_init_module);
+
+static void __exit netxen_exit_module(void)
+{
+ pci_unregister_driver(&netxen_driver);
+
+#ifdef CONFIG_INET
+ unregister_inetaddr_notifier(&netxen_inetaddr_cb);
+ unregister_netdevice_notifier(&netxen_netdev_cb);
+#endif
+}
+
+module_exit(netxen_exit_module);
diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile
new file mode 100644
index 000000000..3d2098f21
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/Makefile
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+# Copyright (c) 2019-2020 Marvell International Ltd.
+
+obj-$(CONFIG_QED) := qed.o
+
+qed-y := \
+ qed_chain.o \
+ qed_cxt.o \
+ qed_dcbx.o \
+ qed_debug.o \
+ qed_dev.o \
+ qed_devlink.o \
+ qed_hw.o \
+ qed_init_fw_funcs.o \
+ qed_init_ops.o \
+ qed_int.o \
+ qed_l2.o \
+ qed_main.o \
+ qed_mcp.o \
+ qed_mng_tlv.o \
+ qed_ptp.o \
+ qed_selftest.o \
+ qed_sp_commands.o \
+ qed_spq.o
+
+qed-$(CONFIG_QED_FCOE) += qed_fcoe.o
+qed-$(CONFIG_QED_ISCSI) += qed_iscsi.o
+qed-$(CONFIG_QED_LL2) += qed_ll2.o
+qed-$(CONFIG_QED_OOO) += qed_ooo.o
+
+qed-$(CONFIG_QED_NVMETCP) += \
+ qed_nvmetcp.o \
+ qed_nvmetcp_fw_funcs.o
+
+qed-$(CONFIG_QED_RDMA) += \
+ qed_iwarp.o \
+ qed_rdma.o \
+ qed_roce.o
+
+qed-$(CONFIG_QED_SRIOV) += \
+ qed_sriov.o \
+ qed_vf.o
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
new file mode 100644
index 000000000..d613095b7
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -0,0 +1,1007 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ * Copyright (c) 2019-2020 Marvell International Ltd.
+ */
+
+#ifndef _QED_H
+#define _QED_H
+
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
+#include <linux/zlib.h>
+#include <linux/hashtable.h>
+#include <linux/qed/qed_if.h>
+#include "qed_debug.h"
+#include "qed_hsi.h"
+#include "qed_dbg_hsi.h"
+#include "qed_mfw_hsi.h"
+
+extern const struct qed_common_ops qed_common_ops_pass;
+
+#define STORM_FW_VERSION \
+ ((FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) | \
+ (FW_REVISION_VERSION << 8) | FW_ENGINEERING_VERSION)
+
+#define MAX_HWFNS_PER_DEVICE (4)
+#define NAME_SIZE 16
+#define VER_SIZE 16
+
+#define QED_WFQ_UNIT 100
+
+#define QED_WID_SIZE (1024)
+#define QED_MIN_WIDS (4)
+#define QED_PF_DEMS_SIZE (4)
+
+#define QED_LLH_DONT_CARE 0
+
+/* cau states */
+enum qed_coalescing_mode {
+ QED_COAL_MODE_DISABLE,
+ QED_COAL_MODE_ENABLE
+};
+
+enum qed_nvm_cmd {
+ QED_PUT_FILE_BEGIN = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN,
+ QED_PUT_FILE_DATA = DRV_MSG_CODE_NVM_PUT_FILE_DATA,
+ QED_NVM_WRITE_NVRAM = DRV_MSG_CODE_NVM_WRITE_NVRAM,
+ QED_GET_MCP_NVM_RESP = 0xFFFFFF00
+};
+
+struct qed_eth_cb_ops;
+struct qed_dev_info;
+union qed_mcp_protocol_stats;
+enum qed_mcp_protocol_type;
+enum qed_mfw_tlv_type;
+union qed_mfw_tlv_data;
+
+/* helpers */
+#define QED_MFW_GET_FIELD(name, field) \
+ (((name) & (field ## _MASK)) >> (field ## _SHIFT))
+
+#define QED_MFW_SET_FIELD(name, field, value) \
+ do { \
+ (name) &= ~(field ## _MASK); \
+ (name) |= (((value) << (field ## _SHIFT)) & (field ## _MASK));\
+ } while (0)
+
+static inline u32 qed_db_addr(u32 cid, u32 DEMS)
+{
+ u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
+ (cid * QED_PF_DEMS_SIZE);
+
+ return db_addr;
+}
+
+static inline u32 qed_db_addr_vf(u32 cid, u32 DEMS)
+{
+ u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
+ FIELD_VALUE(DB_LEGACY_ADDR_ICID, cid);
+
+ return db_addr;
+}
+
+#define ALIGNED_TYPE_SIZE(type_name, p_hwfn) \
+ ((sizeof(type_name) + (u32)(1 << ((p_hwfn)->cdev->cache_shift)) - 1) & \
+ ~((1 << (p_hwfn->cdev->cache_shift)) - 1))
+
+#define for_each_hwfn(cdev, i) for (i = 0; i < (cdev)->num_hwfns; i++)
+
+#define D_TRINE(val, cond1, cond2, true1, true2, def) \
+ ((val) == (cond1) ? true1 : \
+ ((val) == (cond2) ? true2 : def))
+
+/* forward */
+struct qed_ptt_pool;
+struct qed_spq;
+struct qed_sb_info;
+struct qed_sb_attn_info;
+struct qed_cxt_mngr;
+struct qed_sb_sp_info;
+struct qed_ll2_info;
+struct qed_mcp_info;
+struct qed_llh_info;
+
+struct qed_rt_data {
+ u32 *init_val;
+ bool *b_valid;
+};
+
+enum qed_tunn_mode {
+ QED_MODE_L2GENEVE_TUNN,
+ QED_MODE_IPGENEVE_TUNN,
+ QED_MODE_L2GRE_TUNN,
+ QED_MODE_IPGRE_TUNN,
+ QED_MODE_VXLAN_TUNN,
+};
+
+enum qed_tunn_clss {
+ QED_TUNN_CLSS_MAC_VLAN,
+ QED_TUNN_CLSS_MAC_VNI,
+ QED_TUNN_CLSS_INNER_MAC_VLAN,
+ QED_TUNN_CLSS_INNER_MAC_VNI,
+ QED_TUNN_CLSS_MAC_VLAN_DUAL_STAGE,
+ MAX_QED_TUNN_CLSS,
+};
+
+struct qed_tunn_update_type {
+ bool b_update_mode;
+ bool b_mode_enabled;
+ enum qed_tunn_clss tun_cls;
+};
+
+struct qed_tunn_update_udp_port {
+ bool b_update_port;
+ u16 port;
+};
+
+struct qed_tunnel_info {
+ struct qed_tunn_update_type vxlan;
+ struct qed_tunn_update_type l2_geneve;
+ struct qed_tunn_update_type ip_geneve;
+ struct qed_tunn_update_type l2_gre;
+ struct qed_tunn_update_type ip_gre;
+
+ struct qed_tunn_update_udp_port vxlan_port;
+ struct qed_tunn_update_udp_port geneve_port;
+
+ bool b_update_rx_cls;
+ bool b_update_tx_cls;
+};
+
+struct qed_tunn_start_params {
+ unsigned long tunn_mode;
+ u16 vxlan_udp_port;
+ u16 geneve_udp_port;
+ u8 update_vxlan_udp_port;
+ u8 update_geneve_udp_port;
+ u8 tunn_clss_vxlan;
+ u8 tunn_clss_l2geneve;
+ u8 tunn_clss_ipgeneve;
+ u8 tunn_clss_l2gre;
+ u8 tunn_clss_ipgre;
+};
+
+struct qed_tunn_update_params {
+ unsigned long tunn_mode_update_mask;
+ unsigned long tunn_mode;
+ u16 vxlan_udp_port;
+ u16 geneve_udp_port;
+ u8 update_rx_pf_clss;
+ u8 update_tx_pf_clss;
+ u8 update_vxlan_udp_port;
+ u8 update_geneve_udp_port;
+ u8 tunn_clss_vxlan;
+ u8 tunn_clss_l2geneve;
+ u8 tunn_clss_ipgeneve;
+ u8 tunn_clss_l2gre;
+ u8 tunn_clss_ipgre;
+};
+
+/* The PCI personality is not quite synonymous to protocol ID:
+ * 1. All personalities need CORE connections
+ * 2. The Ethernet personality may support also the RoCE/iWARP protocol
+ */
+enum qed_pci_personality {
+ QED_PCI_ETH,
+ QED_PCI_FCOE,
+ QED_PCI_ISCSI,
+ QED_PCI_NVMETCP,
+ QED_PCI_ETH_ROCE,
+ QED_PCI_ETH_IWARP,
+ QED_PCI_ETH_RDMA,
+ QED_PCI_DEFAULT, /* default in shmem */
+};
+
+/* All VFs are symmetric, all counters are PF + all VFs */
+struct qed_qm_iids {
+ u32 cids;
+ u32 vf_cids;
+ u32 tids;
+};
+
+/* HW / FW resources, output of features supported below, most information
+ * is received from MFW.
+ */
+enum qed_resources {
+ QED_SB,
+ QED_L2_QUEUE,
+ QED_VPORT,
+ QED_RSS_ENG,
+ QED_PQ,
+ QED_RL,
+ QED_MAC,
+ QED_VLAN,
+ QED_RDMA_CNQ_RAM,
+ QED_ILT,
+ QED_LL2_RAM_QUEUE,
+ QED_LL2_CTX_QUEUE,
+ QED_CMDQS_CQS,
+ QED_RDMA_STATS_QUEUE,
+ QED_BDQ,
+ QED_MAX_RESC,
+};
+
+enum QED_FEATURE {
+ QED_PF_L2_QUE,
+ QED_VF,
+ QED_RDMA_CNQ,
+ QED_NVMETCP_CQ,
+ QED_ISCSI_CQ,
+ QED_FCOE_CQ,
+ QED_VF_L2_QUE,
+ QED_MAX_FEATURES,
+};
+
+enum qed_dev_cap {
+ QED_DEV_CAP_ETH,
+ QED_DEV_CAP_FCOE,
+ QED_DEV_CAP_ISCSI,
+ QED_DEV_CAP_ROCE,
+ QED_DEV_CAP_IWARP,
+};
+
+enum qed_wol_support {
+ QED_WOL_SUPPORT_NONE,
+ QED_WOL_SUPPORT_PME,
+};
+
+enum qed_db_rec_exec {
+ DB_REC_DRY_RUN,
+ DB_REC_REAL_DEAL,
+ DB_REC_ONCE,
+};
+
+struct qed_hw_info {
+ /* PCI personality */
+ enum qed_pci_personality personality;
+#define QED_IS_RDMA_PERSONALITY(dev) \
+ ((dev)->hw_info.personality == QED_PCI_ETH_ROCE || \
+ (dev)->hw_info.personality == QED_PCI_ETH_IWARP || \
+ (dev)->hw_info.personality == QED_PCI_ETH_RDMA)
+#define QED_IS_ROCE_PERSONALITY(dev) \
+ ((dev)->hw_info.personality == QED_PCI_ETH_ROCE || \
+ (dev)->hw_info.personality == QED_PCI_ETH_RDMA)
+#define QED_IS_IWARP_PERSONALITY(dev) \
+ ((dev)->hw_info.personality == QED_PCI_ETH_IWARP || \
+ (dev)->hw_info.personality == QED_PCI_ETH_RDMA)
+#define QED_IS_L2_PERSONALITY(dev) \
+ ((dev)->hw_info.personality == QED_PCI_ETH || \
+ QED_IS_RDMA_PERSONALITY(dev))
+#define QED_IS_FCOE_PERSONALITY(dev) \
+ ((dev)->hw_info.personality == QED_PCI_FCOE)
+#define QED_IS_ISCSI_PERSONALITY(dev) \
+ ((dev)->hw_info.personality == QED_PCI_ISCSI)
+#define QED_IS_NVMETCP_PERSONALITY(dev) \
+ ((dev)->hw_info.personality == QED_PCI_NVMETCP)
+
+ /* Resource Allocation scheme results */
+ u32 resc_start[QED_MAX_RESC];
+ u32 resc_num[QED_MAX_RESC];
+#define RESC_START(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_start[resc])
+#define RESC_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_num[resc])
+#define RESC_END(_p_hwfn, resc) (RESC_START(_p_hwfn, resc) + \
+ RESC_NUM(_p_hwfn, resc))
+
+ u32 feat_num[QED_MAX_FEATURES];
+#define FEAT_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.feat_num[resc])
+
+ /* Amount of traffic classes HW supports */
+ u8 num_hw_tc;
+
+ /* Amount of TCs which should be active according to DCBx or upper
+ * layer driver configuration.
+ */
+ u8 num_active_tc;
+
+ u8 offload_tc;
+ bool offload_tc_set;
+
+ bool multi_tc_roce_en;
+#define IS_QED_MULTI_TC_ROCE(p_hwfn) ((p_hwfn)->hw_info.multi_tc_roce_en)
+
+ u32 concrete_fid;
+ u16 opaque_fid;
+ u16 ovlan;
+ u32 part_num[4];
+
+ unsigned char hw_mac_addr[ETH_ALEN];
+ u64 node_wwn;
+ u64 port_wwn;
+
+ u16 num_fcoe_conns;
+
+ struct qed_igu_info *p_igu_info;
+
+ u32 hw_mode;
+ unsigned long device_capabilities;
+ u16 mtu;
+
+ enum qed_wol_support b_wol_support;
+};
+
+/* maximun size of read/write commands (HW limit) */
+#define DMAE_MAX_RW_SIZE 0x2000
+
+struct qed_dmae_info {
+ /* Mutex for synchronizing access to functions */
+ struct mutex mutex;
+
+ u8 channel;
+
+ dma_addr_t completion_word_phys_addr;
+
+ /* The memory location where the DMAE writes the completion
+ * value when an operation is finished on this context.
+ */
+ u32 *p_completion_word;
+
+ dma_addr_t intermediate_buffer_phys_addr;
+
+ /* An intermediate buffer for DMAE operations that use virtual
+ * addresses - data is DMA'd to/from this buffer and then
+ * memcpy'd to/from the virtual address
+ */
+ u32 *p_intermediate_buffer;
+
+ dma_addr_t dmae_cmd_phys_addr;
+ struct dmae_cmd *p_dmae_cmd;
+};
+
+struct qed_wfq_data {
+ /* when feature is configured for at least 1 vport */
+ u32 min_speed;
+ bool configured;
+};
+
+struct qed_qm_info {
+ struct init_qm_pq_params *qm_pq_params;
+ struct init_qm_vport_params *qm_vport_params;
+ struct init_qm_port_params *qm_port_params;
+ u16 start_pq;
+ u8 start_vport;
+ u16 pure_lb_pq;
+ u16 first_ofld_pq;
+ u16 first_llt_pq;
+ u16 pure_ack_pq;
+ u16 ooo_pq;
+ u16 first_vf_pq;
+ u16 first_mcos_pq;
+ u16 first_rl_pq;
+ u16 num_pqs;
+ u16 num_vf_pqs;
+ u8 num_vports;
+ u8 max_phys_tcs_per_port;
+ u8 ooo_tc;
+ bool pf_rl_en;
+ bool pf_wfq_en;
+ bool vport_rl_en;
+ bool vport_wfq_en;
+ u8 pf_wfq;
+ u32 pf_rl;
+ struct qed_wfq_data *wfq_data;
+ u8 num_pf_rls;
+};
+
+#define QED_OVERFLOW_BIT 1
+
+struct qed_db_recovery_info {
+ struct list_head list;
+
+ /* Lock to protect the doorbell recovery mechanism list */
+ spinlock_t lock;
+ bool dorq_attn;
+ u32 db_recovery_counter;
+ unsigned long overflow;
+};
+
+struct storm_stats {
+ u32 address;
+ u32 len;
+};
+
+struct qed_storm_stats {
+ struct storm_stats mstats;
+ struct storm_stats pstats;
+ struct storm_stats tstats;
+ struct storm_stats ustats;
+};
+
+struct qed_fw_data {
+ struct fw_ver_info *fw_ver_info;
+ const u8 *modes_tree_buf;
+ union init_op *init_ops;
+ const u32 *arr_data;
+ const u32 *fw_overlays;
+ u32 fw_overlays_len;
+ u32 init_ops_size;
+};
+
+enum qed_mf_mode_bit {
+ /* Supports PF-classification based on tag */
+ QED_MF_OVLAN_CLSS,
+
+ /* Supports PF-classification based on MAC */
+ QED_MF_LLH_MAC_CLSS,
+
+ /* Supports PF-classification based on protocol type */
+ QED_MF_LLH_PROTO_CLSS,
+
+ /* Requires a default PF to be set */
+ QED_MF_NEED_DEF_PF,
+
+ /* Allow LL2 to multicast/broadcast */
+ QED_MF_LL2_NON_UNICAST,
+
+ /* Allow Cross-PF [& child VFs] Tx-switching */
+ QED_MF_INTER_PF_SWITCH,
+
+ /* Unified Fabtic Port support enabled */
+ QED_MF_UFP_SPECIFIC,
+
+ /* Disable Accelerated Receive Flow Steering (aRFS) */
+ QED_MF_DISABLE_ARFS,
+
+ /* Use vlan for steering */
+ QED_MF_8021Q_TAGGING,
+
+ /* Use stag for steering */
+ QED_MF_8021AD_TAGGING,
+
+ /* Allow DSCP to TC mapping */
+ QED_MF_DSCP_TO_TC_MAP,
+
+ /* Do not insert a vlan tag with id 0 */
+ QED_MF_DONT_ADD_VLAN0_TAG,
+};
+
+enum qed_ufp_mode {
+ QED_UFP_MODE_ETS,
+ QED_UFP_MODE_VNIC_BW,
+ QED_UFP_MODE_UNKNOWN
+};
+
+enum qed_ufp_pri_type {
+ QED_UFP_PRI_OS,
+ QED_UFP_PRI_VNIC,
+ QED_UFP_PRI_UNKNOWN
+};
+
+struct qed_ufp_info {
+ enum qed_ufp_pri_type pri_type;
+ enum qed_ufp_mode mode;
+ u8 tc;
+};
+
+enum BAR_ID {
+ BAR_ID_0, /* used for GRC */
+ BAR_ID_1 /* Used for doorbells */
+};
+
+struct qed_nvm_image_info {
+ u32 num_images;
+ struct bist_nvm_image_att *image_att;
+ bool valid;
+};
+
+enum qed_hsi_def_type {
+ QED_HSI_DEF_MAX_NUM_VFS,
+ QED_HSI_DEF_MAX_NUM_L2_QUEUES,
+ QED_HSI_DEF_MAX_NUM_PORTS,
+ QED_HSI_DEF_MAX_SB_PER_PATH,
+ QED_HSI_DEF_MAX_NUM_PFS,
+ QED_HSI_DEF_MAX_NUM_VPORTS,
+ QED_HSI_DEF_NUM_ETH_RSS_ENGINE,
+ QED_HSI_DEF_MAX_QM_TX_QUEUES,
+ QED_HSI_DEF_NUM_PXP_ILT_RECORDS,
+ QED_HSI_DEF_NUM_RDMA_STATISTIC_COUNTERS,
+ QED_HSI_DEF_MAX_QM_GLOBAL_RLS,
+ QED_HSI_DEF_MAX_PBF_CMD_LINES,
+ QED_HSI_DEF_MAX_BTB_BLOCKS,
+ QED_NUM_HSI_DEFS
+};
+
+struct qed_simd_fp_handler {
+ void *token;
+ void (*func)(void *cookie);
+};
+
+enum qed_slowpath_wq_flag {
+ QED_SLOWPATH_MFW_TLV_REQ,
+ QED_SLOWPATH_PERIODIC_DB_REC,
+};
+
+struct qed_hwfn {
+ struct qed_dev *cdev;
+ u8 my_id; /* ID inside the PF */
+#define IS_LEAD_HWFN(edev) (!((edev)->my_id))
+ u8 rel_pf_id; /* Relative to engine*/
+ u8 abs_pf_id;
+#define QED_PATH_ID(_p_hwfn) \
+ (QED_IS_K2((_p_hwfn)->cdev) ? 0 : ((_p_hwfn)->abs_pf_id & 1))
+ u8 port_id;
+ bool b_active;
+
+ u32 dp_module;
+ u8 dp_level;
+ char name[NAME_SIZE];
+
+ bool hw_init_done;
+
+ u8 num_funcs_on_engine;
+ u8 enabled_func_idx;
+
+ /* BAR access */
+ void __iomem *regview;
+ void __iomem *doorbells;
+ u64 db_phys_addr;
+ unsigned long db_size;
+
+ /* PTT pool */
+ struct qed_ptt_pool *p_ptt_pool;
+
+ /* HW info */
+ struct qed_hw_info hw_info;
+
+ /* rt_array (for init-tool) */
+ struct qed_rt_data rt_data;
+
+ /* SPQ */
+ struct qed_spq *p_spq;
+
+ /* EQ */
+ struct qed_eq *p_eq;
+
+ /* Consolidate Q*/
+ struct qed_consq *p_consq;
+
+ /* Slow-Path definitions */
+ struct tasklet_struct sp_dpc;
+ bool b_sp_dpc_enabled;
+
+ struct qed_ptt *p_main_ptt;
+ struct qed_ptt *p_dpc_ptt;
+
+ /* PTP will be used only by the leading function.
+ * Usage of all PTP-apis should be synchronized as result.
+ */
+ struct qed_ptt *p_ptp_ptt;
+
+ struct qed_sb_sp_info *p_sp_sb;
+ struct qed_sb_attn_info *p_sb_attn;
+
+ /* Protocol related */
+ bool using_ll2;
+ struct qed_ll2_info *p_ll2_info;
+ struct qed_ooo_info *p_ooo_info;
+ struct qed_rdma_info *p_rdma_info;
+ struct qed_iscsi_info *p_iscsi_info;
+ struct qed_nvmetcp_info *p_nvmetcp_info;
+ struct qed_fcoe_info *p_fcoe_info;
+ struct qed_pf_params pf_params;
+
+ bool b_rdma_enabled_in_prs;
+ u32 rdma_prs_search_reg;
+
+ struct qed_cxt_mngr *p_cxt_mngr;
+
+ /* Flag indicating whether interrupts are enabled or not*/
+ bool b_int_enabled;
+ bool b_int_requested;
+
+ /* True if the driver requests for the link */
+ bool b_drv_link_init;
+
+ struct qed_vf_iov *vf_iov_info;
+ struct qed_pf_iov *pf_iov_info;
+ struct qed_mcp_info *mcp_info;
+
+ struct qed_dcbx_info *p_dcbx_info;
+
+ struct qed_ufp_info ufp_info;
+
+ struct qed_dmae_info dmae_info;
+
+ /* QM init */
+ struct qed_qm_info qm_info;
+ struct qed_storm_stats storm_stats;
+
+ /* Buffer for unzipping firmware data */
+ void *unzip_buf;
+
+ struct dbg_tools_data dbg_info;
+ void *dbg_user_info;
+ struct virt_mem_desc dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE];
+
+ /* PWM region specific data */
+ u16 wid_count;
+ u32 dpi_size;
+ u32 dpi_count;
+
+ /* This is used to calculate the doorbell address */
+ u32 dpi_start_offset;
+
+ /* If one of the following is set then EDPM shouldn't be used */
+ u8 dcbx_no_edpm;
+ u8 db_bar_no_edpm;
+
+ /* L2-related */
+ struct qed_l2_info *p_l2_info;
+
+ /* Mechanism for recovering from doorbell drop */
+ struct qed_db_recovery_info db_recovery_info;
+
+ /* Nvm images number and attributes */
+ struct qed_nvm_image_info nvm_info;
+
+ struct phys_mem_desc *fw_overlay_mem;
+ struct qed_ptt *p_arfs_ptt;
+
+ struct qed_simd_fp_handler simd_proto_handler[64];
+
+#ifdef CONFIG_QED_SRIOV
+ struct workqueue_struct *iov_wq;
+ struct delayed_work iov_task;
+ unsigned long iov_task_flags;
+#endif
+ struct z_stream_s *stream;
+ bool slowpath_wq_active;
+ struct workqueue_struct *slowpath_wq;
+ struct delayed_work slowpath_task;
+ unsigned long slowpath_task_flags;
+ u32 periodic_db_rec_count;
+};
+
+struct pci_params {
+ int pm_cap;
+
+ unsigned long mem_start;
+ unsigned long mem_end;
+ unsigned int irq;
+ u8 pf_num;
+};
+
+struct qed_int_param {
+ u32 int_mode;
+ u8 num_vectors;
+ u8 min_msix_cnt; /* for minimal functionality */
+};
+
+struct qed_int_params {
+ struct qed_int_param in;
+ struct qed_int_param out;
+ struct msix_entry *msix_table;
+ bool fp_initialized;
+ u8 fp_msix_base;
+ u8 fp_msix_cnt;
+ u8 rdma_msix_base;
+ u8 rdma_msix_cnt;
+};
+
+struct qed_dbg_feature {
+ struct dentry *dentry;
+ u8 *dump_buf;
+ u32 buf_size;
+ u32 dumped_dwords;
+};
+
+struct qed_dev {
+ u32 dp_module;
+ u8 dp_level;
+ char name[NAME_SIZE];
+
+ enum qed_dev_type type;
+ /* Translate type/revision combo into the proper conditions */
+#define QED_IS_BB(dev) ((dev)->type == QED_DEV_TYPE_BB)
+#define QED_IS_BB_B0(dev) (QED_IS_BB(dev) && CHIP_REV_IS_B0(dev))
+#define QED_IS_AH(dev) ((dev)->type == QED_DEV_TYPE_AH)
+#define QED_IS_K2(dev) QED_IS_AH(dev)
+
+ u16 vendor_id;
+
+ u16 device_id;
+#define QED_DEV_ID_MASK 0xff00
+#define QED_DEV_ID_MASK_BB 0x1600
+#define QED_DEV_ID_MASK_AH 0x8000
+
+ u16 chip_num;
+#define CHIP_NUM_MASK 0xffff
+#define CHIP_NUM_SHIFT 16
+
+ u16 chip_rev;
+#define CHIP_REV_MASK 0xf
+#define CHIP_REV_SHIFT 12
+#define CHIP_REV_IS_B0(_cdev) ((_cdev)->chip_rev == 1)
+
+ u16 chip_metal;
+#define CHIP_METAL_MASK 0xff
+#define CHIP_METAL_SHIFT 4
+
+ u16 chip_bond_id;
+#define CHIP_BOND_ID_MASK 0xf
+#define CHIP_BOND_ID_SHIFT 0
+
+ u8 num_engines;
+ u8 num_ports;
+ u8 num_ports_in_engine;
+ u8 num_funcs_in_port;
+
+ u8 path_id;
+
+ unsigned long mf_bits;
+
+ int pcie_width;
+ int pcie_speed;
+
+ /* Add MF related configuration */
+ u8 mcp_rev;
+ u8 boot_mode;
+
+ /* WoL related configurations */
+ u8 wol_config;
+ u8 wol_mac[ETH_ALEN];
+
+ u32 int_mode;
+ enum qed_coalescing_mode int_coalescing_mode;
+ u16 rx_coalesce_usecs;
+ u16 tx_coalesce_usecs;
+
+ /* Start Bar offset of first hwfn */
+ void __iomem *regview;
+ void __iomem *doorbells;
+ u64 db_phys_addr;
+ unsigned long db_size;
+
+ /* PCI */
+ u8 cache_shift;
+
+ /* Init */
+ const u32 *iro_arr;
+#define IRO ((const struct iro *)p_hwfn->cdev->iro_arr)
+
+ /* HW functions */
+ u8 num_hwfns;
+ struct qed_hwfn hwfns[MAX_HWFNS_PER_DEVICE];
+
+ /* Engine affinity */
+ u8 l2_affin_hint;
+ u8 fir_affin;
+ u8 iwarp_affin;
+
+ /* SRIOV */
+ struct qed_hw_sriov_info *p_iov_info;
+#define IS_QED_SRIOV(cdev) (!!(cdev)->p_iov_info)
+ struct qed_tunnel_info tunnel;
+ bool b_is_vf;
+ u32 drv_type;
+ struct qed_eth_stats *reset_stats;
+ struct qed_fw_data *fw_data;
+
+ u32 mcp_nvm_resp;
+
+ /* Recovery */
+ bool recov_in_prog;
+
+ /* Indicates whether should prevent attentions from being reasserted */
+ bool attn_clr_en;
+
+ /* LLH info */
+ u8 ppfid_bitmap;
+ struct qed_llh_info *p_llh_info;
+
+ /* Linux specific here */
+ struct qed_dev_info common_dev_info;
+ struct qede_dev *edev;
+ struct pci_dev *pdev;
+ u32 flags;
+#define QED_FLAG_STORAGE_STARTED (BIT(0))
+ int msg_enable;
+
+ struct pci_params pci_params;
+
+ struct qed_int_params int_params;
+
+ u8 protocol;
+#define IS_QED_ETH_IF(cdev) ((cdev)->protocol == QED_PROTOCOL_ETH)
+#define IS_QED_FCOE_IF(cdev) ((cdev)->protocol == QED_PROTOCOL_FCOE)
+
+ /* Callbacks to protocol driver */
+ union {
+ struct qed_common_cb_ops *common;
+ struct qed_eth_cb_ops *eth;
+ struct qed_fcoe_cb_ops *fcoe;
+ struct qed_iscsi_cb_ops *iscsi;
+ struct qed_nvmetcp_cb_ops *nvmetcp;
+ } protocol_ops;
+ void *ops_cookie;
+
+#ifdef CONFIG_QED_LL2
+ struct qed_cb_ll2_info *ll2;
+ u8 ll2_mac_address[ETH_ALEN];
+#endif
+ struct qed_dbg_feature dbg_features[DBG_FEATURE_NUM];
+ u8 engine_for_debug;
+ bool disable_ilt_dump;
+ bool dbg_bin_dump;
+
+ DECLARE_HASHTABLE(connections, 10);
+ const struct firmware *firmware;
+
+ bool print_dbg_data;
+
+ u32 rdma_max_sge;
+ u32 rdma_max_inline;
+ u32 rdma_max_srq_sge;
+ u16 tunn_feature_mask;
+
+ bool iwarp_cmt;
+};
+
+u32 qed_get_hsi_def_val(struct qed_dev *cdev, enum qed_hsi_def_type type);
+
+#define NUM_OF_VFS(dev) \
+ qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_VFS)
+#define NUM_OF_L2_QUEUES(dev) \
+ qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_L2_QUEUES)
+#define NUM_OF_PORTS(dev) \
+ qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_PORTS)
+#define NUM_OF_SBS(dev) \
+ qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_SB_PER_PATH)
+#define NUM_OF_ENG_PFS(dev) \
+ qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_PFS)
+#define NUM_OF_VPORTS(dev) \
+ qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_VPORTS)
+#define NUM_OF_RSS_ENGINES(dev) \
+ qed_get_hsi_def_val(dev, QED_HSI_DEF_NUM_ETH_RSS_ENGINE)
+#define NUM_OF_QM_TX_QUEUES(dev) \
+ qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_QM_TX_QUEUES)
+#define NUM_OF_PXP_ILT_RECORDS(dev) \
+ qed_get_hsi_def_val(dev, QED_HSI_DEF_NUM_PXP_ILT_RECORDS)
+#define NUM_OF_RDMA_STATISTIC_COUNTERS(dev) \
+ qed_get_hsi_def_val(dev, QED_HSI_DEF_NUM_RDMA_STATISTIC_COUNTERS)
+#define NUM_OF_QM_GLOBAL_RLS(dev) \
+ qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_QM_GLOBAL_RLS)
+#define NUM_OF_PBF_CMD_LINES(dev) \
+ qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_PBF_CMD_LINES)
+#define NUM_OF_BTB_BLOCKS(dev) \
+ qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_BTB_BLOCKS)
+
+/**
+ * qed_concrete_to_sw_fid(): Get the sw function id from
+ * the concrete value.
+ *
+ * @cdev: Qed dev pointer.
+ * @concrete_fid: Concrete fid.
+ *
+ * Return: inline u8.
+ */
+static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
+ u32 concrete_fid)
+{
+ u8 vfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID);
+ u8 pfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID);
+ u8 vf_valid = GET_FIELD(concrete_fid,
+ PXP_CONCRETE_FID_VFVALID);
+ u8 sw_fid;
+
+ if (vf_valid)
+ sw_fid = vfid + MAX_NUM_PFS;
+ else
+ sw_fid = pfid;
+
+ return sw_fid;
+}
+
+#define PKT_LB_TC 9
+
+int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate);
+void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev,
+ struct qed_ptt *p_ptt,
+ u32 min_pf_rate);
+
+void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+int qed_device_num_engines(struct qed_dev *cdev);
+void qed_set_fw_mac_addr(__le16 *fw_msb,
+ __le16 *fw_mid, __le16 *fw_lsb, u8 *mac);
+
+#define QED_LEADING_HWFN(dev) (&(dev)->hwfns[0])
+#define QED_IS_CMT(dev) ((dev)->num_hwfns > 1)
+/* Macros for getting the engine-affinitized hwfn (FIR: fcoe,iscsi,roce) */
+#define QED_FIR_AFFIN_HWFN(dev) (&(dev)->hwfns[dev->fir_affin])
+#define QED_IWARP_AFFIN_HWFN(dev) (&(dev)->hwfns[dev->iwarp_affin])
+#define QED_AFFIN_HWFN(dev) \
+ (QED_IS_IWARP_PERSONALITY(QED_LEADING_HWFN(dev)) ? \
+ QED_IWARP_AFFIN_HWFN(dev) : QED_FIR_AFFIN_HWFN(dev))
+#define QED_AFFIN_HWFN_IDX(dev) (IS_LEAD_HWFN(QED_AFFIN_HWFN(dev)) ? 0 : 1)
+
+/* Flags for indication of required queues */
+#define PQ_FLAGS_RLS (BIT(0))
+#define PQ_FLAGS_MCOS (BIT(1))
+#define PQ_FLAGS_LB (BIT(2))
+#define PQ_FLAGS_OOO (BIT(3))
+#define PQ_FLAGS_ACK (BIT(4))
+#define PQ_FLAGS_OFLD (BIT(5))
+#define PQ_FLAGS_VFS (BIT(6))
+#define PQ_FLAGS_LLT (BIT(7))
+#define PQ_FLAGS_MTC (BIT(8))
+
+/* physical queue index for cm context initialization */
+u16 qed_get_cm_pq_idx(struct qed_hwfn *p_hwfn, u32 pq_flags);
+u16 qed_get_cm_pq_idx_mcos(struct qed_hwfn *p_hwfn, u8 tc);
+u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf);
+u16 qed_get_cm_pq_idx_ofld_mtc(struct qed_hwfn *p_hwfn, u8 tc);
+u16 qed_get_cm_pq_idx_llt_mtc(struct qed_hwfn *p_hwfn, u8 tc);
+
+/* doorbell recovery mechanism */
+void qed_db_recovery_dp(struct qed_hwfn *p_hwfn);
+void qed_db_recovery_execute(struct qed_hwfn *p_hwfn);
+bool qed_edpm_enabled(struct qed_hwfn *p_hwfn);
+
+#define GET_GTT_REG_ADDR(__base, __offset, __idx) \
+ ((__base) + __offset ## _GTT_OFFSET((__idx)))
+
+#define GET_GTT_BDQ_REG_ADDR(__base, __offset, __idx, __bdq_idx) \
+ ((__base) + __offset ## _GTT_OFFSET((__idx), (__bdq_idx)))
+
+/* Other Linux specific common definitions */
+#define DP_NAME(cdev) ((cdev)->name)
+
+#define REG_ADDR(cdev, offset) ((void __iomem *)((u8 __iomem *)\
+ ((cdev)->regview) + \
+ (offset)))
+
+#define REG_RD(cdev, offset) readl(REG_ADDR(cdev, offset))
+#define REG_WR(cdev, offset, val) writel((u32)val, REG_ADDR(cdev, offset))
+#define REG_WR16(cdev, offset, val) writew((u16)val, REG_ADDR(cdev, offset))
+
+#define DOORBELL(cdev, db_addr, val) \
+ writel((u32)val, (void __iomem *)((u8 __iomem *)\
+ ((cdev)->doorbells) + (db_addr)))
+
+#define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \
+ qed_device_num_ports((_p_hwfn)->cdev))
+int qed_device_num_ports(struct qed_dev *cdev);
+
+/* Prototypes */
+int qed_fill_dev_info(struct qed_dev *cdev,
+ struct qed_dev_info *dev_info);
+void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt);
+void qed_bw_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt);
+u32 qed_unzip_data(struct qed_hwfn *p_hwfn,
+ u32 input_len, u8 *input_buf,
+ u32 max_size, u8 *unzip_buf);
+int qed_recovery_process(struct qed_dev *cdev);
+void qed_schedule_recovery_handler(struct qed_hwfn *p_hwfn);
+void qed_hw_error_occurred(struct qed_hwfn *p_hwfn,
+ enum qed_hw_err_type err_type);
+void qed_get_protocol_stats(struct qed_dev *cdev,
+ enum qed_mcp_protocol_type type,
+ union qed_mcp_protocol_stats *stats);
+int qed_slowpath_irq_req(struct qed_hwfn *hwfn);
+void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn);
+int qed_mfw_tlv_req(struct qed_hwfn *hwfn);
+
+int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn,
+ enum qed_mfw_tlv_type type,
+ union qed_mfw_tlv_data *tlv_data);
+
+void qed_hw_info_set_offload_tc(struct qed_hw_info *p_info, u8 tc);
+
+void qed_periodic_db_rec_start(struct qed_hwfn *p_hwfn);
+
+int qed_llh_add_src_tcp_port_filter(struct qed_dev *cdev, u16 src_port);
+int qed_llh_add_dst_tcp_port_filter(struct qed_dev *cdev, u16 dest_port);
+void qed_llh_remove_src_tcp_port_filter(struct qed_dev *cdev, u16 src_port);
+void qed_llh_remove_dst_tcp_port_filter(struct qed_dev *cdev, u16 src_port);
+void qed_llh_clear_all_filters(struct qed_dev *cdev);
+unsigned long qed_get_epoch_time(void);
+#endif /* _QED_H */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_chain.c b/drivers/net/ethernet/qlogic/qed/qed_chain.c
new file mode 100644
index 000000000..b83d17b14
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_chain.c
@@ -0,0 +1,371 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/* Copyright (c) 2020 Marvell International Ltd. */
+
+#include <linux/dma-mapping.h>
+#include <linux/qed/qed_chain.h>
+#include <linux/vmalloc.h>
+
+#include "qed_dev_api.h"
+
+static void qed_chain_init(struct qed_chain *chain,
+ const struct qed_chain_init_params *params,
+ u32 page_cnt)
+{
+ memset(chain, 0, sizeof(*chain));
+
+ chain->elem_size = params->elem_size;
+ chain->intended_use = params->intended_use;
+ chain->mode = params->mode;
+ chain->cnt_type = params->cnt_type;
+
+ chain->elem_per_page = ELEMS_PER_PAGE(params->elem_size,
+ params->page_size);
+ chain->usable_per_page = USABLE_ELEMS_PER_PAGE(params->elem_size,
+ params->page_size,
+ params->mode);
+ chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(params->elem_size,
+ params->mode);
+
+ chain->elem_per_page_mask = chain->elem_per_page - 1;
+ chain->next_page_mask = chain->usable_per_page &
+ chain->elem_per_page_mask;
+
+ chain->page_size = params->page_size;
+ chain->page_cnt = page_cnt;
+ chain->capacity = chain->usable_per_page * page_cnt;
+ chain->size = chain->elem_per_page * page_cnt;
+
+ if (params->ext_pbl_virt) {
+ chain->pbl_sp.table_virt = params->ext_pbl_virt;
+ chain->pbl_sp.table_phys = params->ext_pbl_phys;
+
+ chain->b_external_pbl = true;
+ }
+}
+
+static void qed_chain_init_next_ptr_elem(const struct qed_chain *chain,
+ void *virt_curr, void *virt_next,
+ dma_addr_t phys_next)
+{
+ struct qed_chain_next *next;
+ u32 size;
+
+ size = chain->elem_size * chain->usable_per_page;
+ next = virt_curr + size;
+
+ DMA_REGPAIR_LE(next->next_phys, phys_next);
+ next->next_virt = virt_next;
+}
+
+static void qed_chain_init_mem(struct qed_chain *chain, void *virt_addr,
+ dma_addr_t phys_addr)
+{
+ chain->p_virt_addr = virt_addr;
+ chain->p_phys_addr = phys_addr;
+}
+
+static void qed_chain_free_next_ptr(struct qed_dev *cdev,
+ struct qed_chain *chain)
+{
+ struct device *dev = &cdev->pdev->dev;
+ struct qed_chain_next *next;
+ dma_addr_t phys, phys_next;
+ void *virt, *virt_next;
+ u32 size, i;
+
+ size = chain->elem_size * chain->usable_per_page;
+ virt = chain->p_virt_addr;
+ phys = chain->p_phys_addr;
+
+ for (i = 0; i < chain->page_cnt; i++) {
+ if (!virt)
+ break;
+
+ next = virt + size;
+ virt_next = next->next_virt;
+ phys_next = HILO_DMA_REGPAIR(next->next_phys);
+
+ dma_free_coherent(dev, chain->page_size, virt, phys);
+
+ virt = virt_next;
+ phys = phys_next;
+ }
+}
+
+static void qed_chain_free_single(struct qed_dev *cdev,
+ struct qed_chain *chain)
+{
+ if (!chain->p_virt_addr)
+ return;
+
+ dma_free_coherent(&cdev->pdev->dev, chain->page_size,
+ chain->p_virt_addr, chain->p_phys_addr);
+}
+
+static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *chain)
+{
+ struct device *dev = &cdev->pdev->dev;
+ struct addr_tbl_entry *entry;
+ u32 i;
+
+ if (!chain->pbl.pp_addr_tbl)
+ return;
+
+ for (i = 0; i < chain->page_cnt; i++) {
+ entry = chain->pbl.pp_addr_tbl + i;
+ if (!entry->virt_addr)
+ break;
+
+ dma_free_coherent(dev, chain->page_size, entry->virt_addr,
+ entry->dma_map);
+ }
+
+ if (!chain->b_external_pbl)
+ dma_free_coherent(dev, chain->pbl_sp.table_size,
+ chain->pbl_sp.table_virt,
+ chain->pbl_sp.table_phys);
+
+ vfree(chain->pbl.pp_addr_tbl);
+ chain->pbl.pp_addr_tbl = NULL;
+}
+
+/**
+ * qed_chain_free() - Free chain DMA memory.
+ *
+ * @cdev: Main device structure.
+ * @chain: Chain to free.
+ */
+void qed_chain_free(struct qed_dev *cdev, struct qed_chain *chain)
+{
+ switch (chain->mode) {
+ case QED_CHAIN_MODE_NEXT_PTR:
+ qed_chain_free_next_ptr(cdev, chain);
+ break;
+ case QED_CHAIN_MODE_SINGLE:
+ qed_chain_free_single(cdev, chain);
+ break;
+ case QED_CHAIN_MODE_PBL:
+ qed_chain_free_pbl(cdev, chain);
+ break;
+ default:
+ return;
+ }
+
+ qed_chain_init_mem(chain, NULL, 0);
+}
+
+static int
+qed_chain_alloc_sanity_check(struct qed_dev *cdev,
+ const struct qed_chain_init_params *params,
+ u32 page_cnt)
+{
+ u64 chain_size;
+
+ chain_size = ELEMS_PER_PAGE(params->elem_size, params->page_size);
+ chain_size *= page_cnt;
+
+ if (!chain_size)
+ return -EINVAL;
+
+ /* The actual chain size can be larger than the maximal possible value
+ * after rounding up the requested elements number to pages, and after
+ * taking into account the unusuable elements (next-ptr elements).
+ * The size of a "u16" chain can be (U16_MAX + 1) since the chain
+ * size/capacity fields are of u32 type.
+ */
+ switch (params->cnt_type) {
+ case QED_CHAIN_CNT_TYPE_U16:
+ if (chain_size > U16_MAX + 1)
+ break;
+
+ return 0;
+ case QED_CHAIN_CNT_TYPE_U32:
+ if (chain_size > U32_MAX)
+ break;
+
+ return 0;
+ default:
+ return -EINVAL;
+ }
+
+ DP_NOTICE(cdev,
+ "The actual chain size (0x%llx) is larger than the maximal possible value\n",
+ chain_size);
+
+ return -EINVAL;
+}
+
+static int qed_chain_alloc_next_ptr(struct qed_dev *cdev,
+ struct qed_chain *chain)
+{
+ struct device *dev = &cdev->pdev->dev;
+ void *virt, *virt_prev = NULL;
+ dma_addr_t phys;
+ u32 i;
+
+ for (i = 0; i < chain->page_cnt; i++) {
+ virt = dma_alloc_coherent(dev, chain->page_size, &phys,
+ GFP_KERNEL);
+ if (!virt)
+ return -ENOMEM;
+
+ if (i == 0) {
+ qed_chain_init_mem(chain, virt, phys);
+ qed_chain_reset(chain);
+ } else {
+ qed_chain_init_next_ptr_elem(chain, virt_prev, virt,
+ phys);
+ }
+
+ virt_prev = virt;
+ }
+
+ /* Last page's next element should point to the beginning of the
+ * chain.
+ */
+ qed_chain_init_next_ptr_elem(chain, virt_prev, chain->p_virt_addr,
+ chain->p_phys_addr);
+
+ return 0;
+}
+
+static int qed_chain_alloc_single(struct qed_dev *cdev,
+ struct qed_chain *chain)
+{
+ dma_addr_t phys;
+ void *virt;
+
+ virt = dma_alloc_coherent(&cdev->pdev->dev, chain->page_size,
+ &phys, GFP_KERNEL);
+ if (!virt)
+ return -ENOMEM;
+
+ qed_chain_init_mem(chain, virt, phys);
+ qed_chain_reset(chain);
+
+ return 0;
+}
+
+static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *chain)
+{
+ struct device *dev = &cdev->pdev->dev;
+ struct addr_tbl_entry *addr_tbl;
+ dma_addr_t phys, pbl_phys;
+ __le64 *pbl_virt;
+ u32 page_cnt, i;
+ size_t size;
+ void *virt;
+
+ page_cnt = chain->page_cnt;
+
+ size = array_size(page_cnt, sizeof(*addr_tbl));
+ if (unlikely(size == SIZE_MAX))
+ return -EOVERFLOW;
+
+ addr_tbl = vzalloc(size);
+ if (!addr_tbl)
+ return -ENOMEM;
+
+ chain->pbl.pp_addr_tbl = addr_tbl;
+
+ if (chain->b_external_pbl) {
+ pbl_virt = chain->pbl_sp.table_virt;
+ goto alloc_pages;
+ }
+
+ size = array_size(page_cnt, sizeof(*pbl_virt));
+ if (unlikely(size == SIZE_MAX))
+ return -EOVERFLOW;
+
+ pbl_virt = dma_alloc_coherent(dev, size, &pbl_phys, GFP_KERNEL);
+ if (!pbl_virt)
+ return -ENOMEM;
+
+ chain->pbl_sp.table_virt = pbl_virt;
+ chain->pbl_sp.table_phys = pbl_phys;
+ chain->pbl_sp.table_size = size;
+
+alloc_pages:
+ for (i = 0; i < page_cnt; i++) {
+ virt = dma_alloc_coherent(dev, chain->page_size, &phys,
+ GFP_KERNEL);
+ if (!virt)
+ return -ENOMEM;
+
+ if (i == 0) {
+ qed_chain_init_mem(chain, virt, phys);
+ qed_chain_reset(chain);
+ }
+
+ /* Fill the PBL table with the physical address of the page */
+ pbl_virt[i] = cpu_to_le64(phys);
+
+ /* Keep the virtual address of the page */
+ addr_tbl[i].virt_addr = virt;
+ addr_tbl[i].dma_map = phys;
+ }
+
+ return 0;
+}
+
+/**
+ * qed_chain_alloc() - Allocate and initialize a chain.
+ *
+ * @cdev: Main device structure.
+ * @chain: Chain to be processed.
+ * @params: Chain initialization parameters.
+ *
+ * Return: 0 on success, negative errno otherwise.
+ */
+int qed_chain_alloc(struct qed_dev *cdev, struct qed_chain *chain,
+ struct qed_chain_init_params *params)
+{
+ u32 page_cnt;
+ int rc;
+
+ if (!params->page_size)
+ params->page_size = QED_CHAIN_PAGE_SIZE;
+
+ if (params->mode == QED_CHAIN_MODE_SINGLE)
+ page_cnt = 1;
+ else
+ page_cnt = QED_CHAIN_PAGE_CNT(params->num_elems,
+ params->elem_size,
+ params->page_size,
+ params->mode);
+
+ rc = qed_chain_alloc_sanity_check(cdev, params, page_cnt);
+ if (rc) {
+ DP_NOTICE(cdev,
+ "Cannot allocate a chain with the given arguments:\n");
+ DP_NOTICE(cdev,
+ "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu, page_size %u]\n",
+ params->intended_use, params->mode, params->cnt_type,
+ params->num_elems, params->elem_size,
+ params->page_size);
+ return rc;
+ }
+
+ qed_chain_init(chain, params, page_cnt);
+
+ switch (params->mode) {
+ case QED_CHAIN_MODE_NEXT_PTR:
+ rc = qed_chain_alloc_next_ptr(cdev, chain);
+ break;
+ case QED_CHAIN_MODE_SINGLE:
+ rc = qed_chain_alloc_single(cdev, chain);
+ break;
+ case QED_CHAIN_MODE_PBL:
+ rc = qed_chain_alloc_pbl(cdev, chain);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (!rc)
+ return 0;
+
+ qed_chain_free(cdev, chain);
+
+ return rc;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
new file mode 100644
index 000000000..33f4f58ee
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -0,0 +1,2571 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ * Copyright (c) 2019-2020 Marvell International Ltd.
+ */
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/log2.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include "qed.h"
+#include "qed_cxt.h"
+#include "qed_dev_api.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_init_ops.h"
+#include "qed_rdma.h"
+#include "qed_reg_addr.h"
+#include "qed_sriov.h"
+
+/* QM constants */
+#define QM_PQ_ELEMENT_SIZE 4 /* in bytes */
+
+/* Doorbell-Queue constants */
+#define DQ_RANGE_SHIFT 4
+#define DQ_RANGE_ALIGN BIT(DQ_RANGE_SHIFT)
+
+/* Searcher constants */
+#define SRC_MIN_NUM_ELEMS 256
+
+/* Timers constants */
+#define TM_SHIFT 7
+#define TM_ALIGN BIT(TM_SHIFT)
+#define TM_ELEM_SIZE 4
+
+#define ILT_DEFAULT_HW_P_SIZE 4
+
+#define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12))
+#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
+
+/* ILT entry structure */
+#define ILT_ENTRY_PHY_ADDR_MASK (~0ULL >> 12)
+#define ILT_ENTRY_PHY_ADDR_SHIFT 0
+#define ILT_ENTRY_VALID_MASK 0x1ULL
+#define ILT_ENTRY_VALID_SHIFT 52
+#define ILT_ENTRY_IN_REGS 2
+#define ILT_REG_SIZE_IN_BYTES 4
+
+/* connection context union */
+union conn_context {
+ struct core_conn_context core_ctx;
+ struct eth_conn_context eth_ctx;
+ struct iscsi_conn_context iscsi_ctx;
+ struct fcoe_conn_context fcoe_ctx;
+ struct roce_conn_context roce_ctx;
+};
+
+/* TYPE-0 task context - iSCSI, FCOE */
+union type0_task_context {
+ struct iscsi_task_context iscsi_ctx;
+ struct fcoe_task_context fcoe_ctx;
+};
+
+/* TYPE-1 task context - ROCE */
+union type1_task_context {
+ struct rdma_task_context roce_ctx;
+};
+
+struct src_ent {
+ __u8 opaque[56];
+ __be64 next;
+};
+
+#define CDUT_SEG_ALIGNMET 3 /* in 4k chunks */
+#define CDUT_SEG_ALIGNMET_IN_BYTES BIT(CDUT_SEG_ALIGNMET + 12)
+
+#define CONN_CXT_SIZE(p_hwfn) \
+ ALIGNED_TYPE_SIZE(union conn_context, p_hwfn)
+
+#define SRQ_CXT_SIZE (sizeof(struct rdma_srq_context))
+#define XRC_SRQ_CXT_SIZE (sizeof(struct rdma_xrc_srq_context))
+
+#define TYPE0_TASK_CXT_SIZE(p_hwfn) \
+ ALIGNED_TYPE_SIZE(union type0_task_context, p_hwfn)
+
+/* Alignment is inherent to the type1_task_context structure */
+#define TYPE1_TASK_CXT_SIZE(p_hwfn) sizeof(union type1_task_context)
+
+static bool src_proto(enum protocol_type type)
+{
+ return type == PROTOCOLID_TCP_ULP ||
+ type == PROTOCOLID_FCOE ||
+ type == PROTOCOLID_IWARP;
+}
+
+static bool tm_cid_proto(enum protocol_type type)
+{
+ return type == PROTOCOLID_TCP_ULP ||
+ type == PROTOCOLID_FCOE ||
+ type == PROTOCOLID_ROCE ||
+ type == PROTOCOLID_IWARP;
+}
+
+static bool tm_tid_proto(enum protocol_type type)
+{
+ return type == PROTOCOLID_FCOE;
+}
+
+/* counts the iids for the CDU/CDUC ILT client configuration */
+struct qed_cdu_iids {
+ u32 pf_cids;
+ u32 per_vf_cids;
+};
+
+static void qed_cxt_cdu_iids(struct qed_cxt_mngr *p_mngr,
+ struct qed_cdu_iids *iids)
+{
+ u32 type;
+
+ for (type = 0; type < MAX_CONN_TYPES; type++) {
+ iids->pf_cids += p_mngr->conn_cfg[type].cid_count;
+ iids->per_vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
+ }
+}
+
+/* counts the iids for the Searcher block configuration */
+struct qed_src_iids {
+ u32 pf_cids;
+ u32 per_vf_cids;
+};
+
+static void qed_cxt_src_iids(struct qed_cxt_mngr *p_mngr,
+ struct qed_src_iids *iids)
+{
+ u32 i;
+
+ for (i = 0; i < MAX_CONN_TYPES; i++) {
+ if (!src_proto(i))
+ continue;
+
+ iids->pf_cids += p_mngr->conn_cfg[i].cid_count;
+ iids->per_vf_cids += p_mngr->conn_cfg[i].cids_per_vf;
+ }
+
+ /* Add L2 filtering filters in addition */
+ iids->pf_cids += p_mngr->arfs_count;
+}
+
+/* counts the iids for the Timers block configuration */
+struct qed_tm_iids {
+ u32 pf_cids;
+ u32 pf_tids[NUM_TASK_PF_SEGMENTS]; /* per segment */
+ u32 pf_tids_total;
+ u32 per_vf_cids;
+ u32 per_vf_tids;
+};
+
+static void qed_cxt_tm_iids(struct qed_hwfn *p_hwfn,
+ struct qed_cxt_mngr *p_mngr,
+ struct qed_tm_iids *iids)
+{
+ bool tm_vf_required = false;
+ bool tm_required = false;
+ int i, j;
+
+ /* Timers is a special case -> we don't count how many cids require
+ * timers but what's the max cid that will be used by the timer block.
+ * therefore we traverse in reverse order, and once we hit a protocol
+ * that requires the timers memory, we'll sum all the protocols up
+ * to that one.
+ */
+ for (i = MAX_CONN_TYPES - 1; i >= 0; i--) {
+ struct qed_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[i];
+
+ if (tm_cid_proto(i) || tm_required) {
+ if (p_cfg->cid_count)
+ tm_required = true;
+
+ iids->pf_cids += p_cfg->cid_count;
+ }
+
+ if (tm_cid_proto(i) || tm_vf_required) {
+ if (p_cfg->cids_per_vf)
+ tm_vf_required = true;
+
+ iids->per_vf_cids += p_cfg->cids_per_vf;
+ }
+
+ if (tm_tid_proto(i)) {
+ struct qed_tid_seg *segs = p_cfg->tid_seg;
+
+ /* for each segment there is at most one
+ * protocol for which count is not 0.
+ */
+ for (j = 0; j < NUM_TASK_PF_SEGMENTS; j++)
+ iids->pf_tids[j] += segs[j].count;
+
+ /* The last array elelment is for the VFs. As for PF
+ * segments there can be only one protocol for
+ * which this value is not 0.
+ */
+ iids->per_vf_tids += segs[NUM_TASK_PF_SEGMENTS].count;
+ }
+ }
+
+ iids->pf_cids = roundup(iids->pf_cids, TM_ALIGN);
+ iids->per_vf_cids = roundup(iids->per_vf_cids, TM_ALIGN);
+ iids->per_vf_tids = roundup(iids->per_vf_tids, TM_ALIGN);
+
+ for (iids->pf_tids_total = 0, j = 0; j < NUM_TASK_PF_SEGMENTS; j++) {
+ iids->pf_tids[j] = roundup(iids->pf_tids[j], TM_ALIGN);
+ iids->pf_tids_total += iids->pf_tids[j];
+ }
+}
+
+static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn,
+ struct qed_qm_iids *iids)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct qed_tid_seg *segs;
+ u32 vf_cids = 0, type, j;
+ u32 vf_tids = 0;
+
+ for (type = 0; type < MAX_CONN_TYPES; type++) {
+ iids->cids += p_mngr->conn_cfg[type].cid_count;
+ vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
+
+ segs = p_mngr->conn_cfg[type].tid_seg;
+ /* for each segment there is at most one
+ * protocol for which count is not 0.
+ */
+ for (j = 0; j < NUM_TASK_PF_SEGMENTS; j++)
+ iids->tids += segs[j].count;
+
+ /* The last array elelment is for the VFs. As for PF
+ * segments there can be only one protocol for
+ * which this value is not 0.
+ */
+ vf_tids += segs[NUM_TASK_PF_SEGMENTS].count;
+ }
+
+ iids->vf_cids = vf_cids;
+ iids->tids += vf_tids * p_mngr->vf_count;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_ILT,
+ "iids: CIDS %08x vf_cids %08x tids %08x vf_tids %08x\n",
+ iids->cids, iids->vf_cids, iids->tids, vf_tids);
+}
+
+static struct qed_tid_seg *qed_cxt_tid_seg_info(struct qed_hwfn *p_hwfn,
+ u32 seg)
+{
+ struct qed_cxt_mngr *p_cfg = p_hwfn->p_cxt_mngr;
+ u32 i;
+
+ /* Find the protocol with tid count > 0 for this segment.
+ * Note: there can only be one and this is already validated.
+ */
+ for (i = 0; i < MAX_CONN_TYPES; i++)
+ if (p_cfg->conn_cfg[i].tid_seg[seg].count)
+ return &p_cfg->conn_cfg[i].tid_seg[seg];
+ return NULL;
+}
+
+static void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn,
+ u32 num_srqs, u32 num_xrc_srqs)
+{
+ struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
+
+ p_mgr->srq_count = num_srqs;
+ p_mgr->xrc_srq_count = num_xrc_srqs;
+}
+
+u32 qed_cxt_get_ilt_page_size(struct qed_hwfn *p_hwfn,
+ enum ilt_clients ilt_client)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct qed_ilt_client_cfg *p_cli = &p_mngr->clients[ilt_client];
+
+ return ILT_PAGE_IN_BYTES(p_cli->p_size.val);
+}
+
+static u32 qed_cxt_xrc_srqs_per_page(struct qed_hwfn *p_hwfn)
+{
+ u32 page_size;
+
+ page_size = qed_cxt_get_ilt_page_size(p_hwfn, ILT_CLI_TSDM);
+ return page_size / XRC_SRQ_CXT_SIZE;
+}
+
+u32 qed_cxt_get_total_srq_count(struct qed_hwfn *p_hwfn)
+{
+ struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
+ u32 total_srqs;
+
+ total_srqs = p_mgr->srq_count + p_mgr->xrc_srq_count;
+
+ return total_srqs;
+}
+
+/* set the iids count per protocol */
+static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn,
+ enum protocol_type type,
+ u32 cid_count, u32 vf_cid_cnt)
+{
+ struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
+ struct qed_conn_type_cfg *p_conn = &p_mgr->conn_cfg[type];
+
+ p_conn->cid_count = roundup(cid_count, DQ_RANGE_ALIGN);
+ p_conn->cids_per_vf = roundup(vf_cid_cnt, DQ_RANGE_ALIGN);
+
+ if (type == PROTOCOLID_ROCE) {
+ u32 page_sz = p_mgr->clients[ILT_CLI_CDUC].p_size.val;
+ u32 cxt_size = CONN_CXT_SIZE(p_hwfn);
+ u32 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
+ u32 align = elems_per_page * DQ_RANGE_ALIGN;
+
+ p_conn->cid_count = roundup(p_conn->cid_count, align);
+ }
+}
+
+u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
+ enum protocol_type type, u32 *vf_cid)
+{
+ if (vf_cid)
+ *vf_cid = p_hwfn->p_cxt_mngr->conn_cfg[type].cids_per_vf;
+
+ return p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
+}
+
+u32 qed_cxt_get_proto_cid_start(struct qed_hwfn *p_hwfn,
+ enum protocol_type type)
+{
+ return p_hwfn->p_cxt_mngr->acquired[type].start_cid;
+}
+
+u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn,
+ enum protocol_type type)
+{
+ u32 cnt = 0;
+ int i;
+
+ for (i = 0; i < TASK_SEGMENTS; i++)
+ cnt += p_hwfn->p_cxt_mngr->conn_cfg[type].tid_seg[i].count;
+
+ return cnt;
+}
+
+static void qed_cxt_set_proto_tid_count(struct qed_hwfn *p_hwfn,
+ enum protocol_type proto,
+ u8 seg,
+ u8 seg_type, u32 count, bool has_fl)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct qed_tid_seg *p_seg = &p_mngr->conn_cfg[proto].tid_seg[seg];
+
+ p_seg->count = count;
+ p_seg->has_fl_mem = has_fl;
+ p_seg->type = seg_type;
+}
+
+static void qed_ilt_cli_blk_fill(struct qed_ilt_client_cfg *p_cli,
+ struct qed_ilt_cli_blk *p_blk,
+ u32 start_line, u32 total_size, u32 elem_size)
+{
+ u32 ilt_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
+
+ /* verify thatits called only once for each block */
+ if (p_blk->total_size)
+ return;
+
+ p_blk->total_size = total_size;
+ p_blk->real_size_in_page = 0;
+ if (elem_size)
+ p_blk->real_size_in_page = (ilt_size / elem_size) * elem_size;
+ p_blk->start_line = start_line;
+}
+
+static void qed_ilt_cli_adv_line(struct qed_hwfn *p_hwfn,
+ struct qed_ilt_client_cfg *p_cli,
+ struct qed_ilt_cli_blk *p_blk,
+ u32 *p_line, enum ilt_clients client_id)
+{
+ if (!p_blk->total_size)
+ return;
+
+ if (!p_cli->active)
+ p_cli->first.val = *p_line;
+
+ p_cli->active = true;
+ *p_line += DIV_ROUND_UP(p_blk->total_size, p_blk->real_size_in_page);
+ p_cli->last.val = *p_line - 1;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_ILT,
+ "ILT[Client %d] - Lines: [%08x - %08x]. Block - Size %08x [Real %08x] Start line %d\n",
+ client_id, p_cli->first.val,
+ p_cli->last.val, p_blk->total_size,
+ p_blk->real_size_in_page, p_blk->start_line);
+}
+
+static u32 qed_ilt_get_dynamic_line_cnt(struct qed_hwfn *p_hwfn,
+ enum ilt_clients ilt_client)
+{
+ u32 cid_count = p_hwfn->p_cxt_mngr->conn_cfg[PROTOCOLID_ROCE].cid_count;
+ struct qed_ilt_client_cfg *p_cli;
+ u32 lines_to_skip = 0;
+ u32 cxts_per_p;
+
+ if (ilt_client == ILT_CLI_CDUC) {
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
+
+ cxts_per_p = ILT_PAGE_IN_BYTES(p_cli->p_size.val) /
+ (u32) CONN_CXT_SIZE(p_hwfn);
+
+ lines_to_skip = cid_count / cxts_per_p;
+ }
+
+ return lines_to_skip;
+}
+
+static struct qed_ilt_client_cfg *qed_cxt_set_cli(struct qed_ilt_client_cfg
+ *p_cli)
+{
+ p_cli->active = false;
+ p_cli->first.val = 0;
+ p_cli->last.val = 0;
+ return p_cli;
+}
+
+static struct qed_ilt_cli_blk *qed_cxt_set_blk(struct qed_ilt_cli_blk *p_blk)
+{
+ p_blk->total_size = 0;
+ return p_blk;
+}
+
+static void qed_cxt_ilt_blk_reset(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients;
+ u32 cli_idx, blk_idx;
+
+ for (cli_idx = 0; cli_idx < MAX_ILT_CLIENTS; cli_idx++) {
+ for (blk_idx = 0; blk_idx < ILT_CLI_PF_BLOCKS; blk_idx++)
+ clients[cli_idx].pf_blks[blk_idx].total_size = 0;
+
+ for (blk_idx = 0; blk_idx < ILT_CLI_VF_BLOCKS; blk_idx++)
+ clients[cli_idx].vf_blks[blk_idx].total_size = 0;
+ }
+}
+
+int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *line_count)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 curr_line, total, i, task_size, line;
+ struct qed_ilt_client_cfg *p_cli;
+ struct qed_ilt_cli_blk *p_blk;
+ struct qed_cdu_iids cdu_iids;
+ struct qed_src_iids src_iids;
+ struct qed_qm_iids qm_iids;
+ struct qed_tm_iids tm_iids;
+ struct qed_tid_seg *p_seg;
+
+ memset(&qm_iids, 0, sizeof(qm_iids));
+ memset(&cdu_iids, 0, sizeof(cdu_iids));
+ memset(&src_iids, 0, sizeof(src_iids));
+ memset(&tm_iids, 0, sizeof(tm_iids));
+
+ p_mngr->pf_start_line = RESC_START(p_hwfn, QED_ILT);
+
+ /* Reset all ILT blocks at the beginning of ILT computing in order
+ * to prevent memory allocation for irrelevant blocks afterwards.
+ */
+ qed_cxt_ilt_blk_reset(p_hwfn);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_ILT,
+ "hwfn [%d] - Set context manager starting line to be 0x%08x\n",
+ p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line);
+
+ /* CDUC */
+ p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_CDUC]);
+
+ curr_line = p_mngr->pf_start_line;
+
+ /* CDUC PF */
+ p_cli->pf_total_lines = 0;
+
+ /* get the counters for the CDUC and QM clients */
+ qed_cxt_cdu_iids(p_mngr, &cdu_iids);
+
+ p_blk = qed_cxt_set_blk(&p_cli->pf_blks[CDUC_BLK]);
+
+ total = cdu_iids.pf_cids * CONN_CXT_SIZE(p_hwfn);
+
+ qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+ total, CONN_CXT_SIZE(p_hwfn));
+
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
+ p_cli->pf_total_lines = curr_line - p_blk->start_line;
+
+ p_blk->dynamic_line_cnt = qed_ilt_get_dynamic_line_cnt(p_hwfn,
+ ILT_CLI_CDUC);
+
+ /* CDUC VF */
+ p_blk = qed_cxt_set_blk(&p_cli->vf_blks[CDUC_BLK]);
+ total = cdu_iids.per_vf_cids * CONN_CXT_SIZE(p_hwfn);
+
+ qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+ total, CONN_CXT_SIZE(p_hwfn));
+
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
+ p_cli->vf_total_lines = curr_line - p_blk->start_line;
+
+ for (i = 1; i < p_mngr->vf_count; i++)
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_CDUC);
+
+ /* CDUT PF */
+ p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_CDUT]);
+ p_cli->first.val = curr_line;
+
+ /* first the 'working' task memory */
+ for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
+ p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
+ if (!p_seg || p_seg->count == 0)
+ continue;
+
+ p_blk = qed_cxt_set_blk(&p_cli->pf_blks[CDUT_SEG_BLK(i)]);
+ total = p_seg->count * p_mngr->task_type_size[p_seg->type];
+ qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total,
+ p_mngr->task_type_size[p_seg->type]);
+
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_CDUT);
+ }
+
+ /* next the 'init' task memory (forced load memory) */
+ for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
+ p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
+ if (!p_seg || p_seg->count == 0)
+ continue;
+
+ p_blk =
+ qed_cxt_set_blk(&p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)]);
+
+ if (!p_seg->has_fl_mem) {
+ /* The segment is active (total size pf 'working'
+ * memory is > 0) but has no FL (forced-load, Init)
+ * memory. Thus:
+ *
+ * 1. The total-size in the corrsponding FL block of
+ * the ILT client is set to 0 - No ILT line are
+ * provisioned and no ILT memory allocated.
+ *
+ * 2. The start-line of said block is set to the
+ * start line of the matching working memory
+ * block in the ILT client. This is later used to
+ * configure the CDU segment offset registers and
+ * results in an FL command for TIDs of this
+ * segement behaves as regular load commands
+ * (loading TIDs from the working memory).
+ */
+ line = p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line;
+
+ qed_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0);
+ continue;
+ }
+ total = p_seg->count * p_mngr->task_type_size[p_seg->type];
+
+ qed_ilt_cli_blk_fill(p_cli, p_blk,
+ curr_line, total,
+ p_mngr->task_type_size[p_seg->type]);
+
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_CDUT);
+ }
+ p_cli->pf_total_lines = curr_line - p_cli->pf_blks[0].start_line;
+
+ /* CDUT VF */
+ p_seg = qed_cxt_tid_seg_info(p_hwfn, TASK_SEGMENT_VF);
+ if (p_seg && p_seg->count) {
+ /* Stricly speaking we need to iterate over all VF
+ * task segment types, but a VF has only 1 segment
+ */
+
+ /* 'working' memory */
+ total = p_seg->count * p_mngr->task_type_size[p_seg->type];
+
+ p_blk = qed_cxt_set_blk(&p_cli->vf_blks[CDUT_SEG_BLK(0)]);
+ qed_ilt_cli_blk_fill(p_cli, p_blk,
+ curr_line, total,
+ p_mngr->task_type_size[p_seg->type]);
+
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_CDUT);
+
+ /* 'init' memory */
+ p_blk =
+ qed_cxt_set_blk(&p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)]);
+ if (!p_seg->has_fl_mem) {
+ /* see comment above */
+ line = p_cli->vf_blks[CDUT_SEG_BLK(0)].start_line;
+ qed_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0);
+ } else {
+ task_size = p_mngr->task_type_size[p_seg->type];
+ qed_ilt_cli_blk_fill(p_cli, p_blk,
+ curr_line, total, task_size);
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_CDUT);
+ }
+ p_cli->vf_total_lines = curr_line -
+ p_cli->vf_blks[0].start_line;
+
+ /* Now for the rest of the VFs */
+ for (i = 1; i < p_mngr->vf_count; i++) {
+ p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)];
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_CDUT);
+
+ p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)];
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_CDUT);
+ }
+ }
+
+ /* QM */
+ p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_QM]);
+ p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]);
+
+ qed_cxt_qm_iids(p_hwfn, &qm_iids);
+ total = qed_qm_pf_mem_size(qm_iids.cids,
+ qm_iids.vf_cids, qm_iids.tids,
+ p_hwfn->qm_info.num_pqs,
+ p_hwfn->qm_info.num_vf_pqs);
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_ILT,
+ "QM ILT Info, (cids=%d, vf_cids=%d, tids=%d, num_pqs=%d, num_vf_pqs=%d, memory_size=%d)\n",
+ qm_iids.cids,
+ qm_iids.vf_cids,
+ qm_iids.tids,
+ p_hwfn->qm_info.num_pqs, p_hwfn->qm_info.num_vf_pqs, total);
+
+ qed_ilt_cli_blk_fill(p_cli, p_blk,
+ curr_line, total * 0x1000,
+ QM_PQ_ELEMENT_SIZE);
+
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_QM);
+ p_cli->pf_total_lines = curr_line - p_blk->start_line;
+
+ /* SRC */
+ p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_SRC]);
+ qed_cxt_src_iids(p_mngr, &src_iids);
+
+ /* Both the PF and VFs searcher connections are stored in the per PF
+ * database. Thus sum the PF searcher cids and all the VFs searcher
+ * cids.
+ */
+ total = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
+ if (total) {
+ u32 local_max = max_t(u32, total,
+ SRC_MIN_NUM_ELEMS);
+
+ total = roundup_pow_of_two(local_max);
+
+ p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]);
+ qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+ total * sizeof(struct src_ent),
+ sizeof(struct src_ent));
+
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_SRC);
+ p_cli->pf_total_lines = curr_line - p_blk->start_line;
+ }
+
+ /* TM PF */
+ p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_TM]);
+ qed_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids);
+ total = tm_iids.pf_cids + tm_iids.pf_tids_total;
+ if (total) {
+ p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]);
+ qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+ total * TM_ELEM_SIZE, TM_ELEM_SIZE);
+
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_TM);
+ p_cli->pf_total_lines = curr_line - p_blk->start_line;
+ }
+
+ /* TM VF */
+ total = tm_iids.per_vf_cids + tm_iids.per_vf_tids;
+ if (total) {
+ p_blk = qed_cxt_set_blk(&p_cli->vf_blks[0]);
+ qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+ total * TM_ELEM_SIZE, TM_ELEM_SIZE);
+
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_TM);
+
+ p_cli->vf_total_lines = curr_line - p_blk->start_line;
+ for (i = 1; i < p_mngr->vf_count; i++)
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_TM);
+ }
+
+ /* TSDM (SRQ CONTEXT) */
+ total = qed_cxt_get_total_srq_count(p_hwfn);
+
+ if (total) {
+ p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_TSDM]);
+ p_blk = qed_cxt_set_blk(&p_cli->pf_blks[SRQ_BLK]);
+ qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+ total * SRQ_CXT_SIZE, SRQ_CXT_SIZE);
+
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_TSDM);
+ p_cli->pf_total_lines = curr_line - p_blk->start_line;
+ }
+
+ *line_count = curr_line - p_hwfn->p_cxt_mngr->pf_start_line;
+
+ if (curr_line - p_hwfn->p_cxt_mngr->pf_start_line >
+ RESC_NUM(p_hwfn, QED_ILT))
+ return -EINVAL;
+
+ return 0;
+}
+
+u32 qed_cxt_cfg_ilt_compute_excess(struct qed_hwfn *p_hwfn, u32 used_lines)
+{
+ struct qed_ilt_client_cfg *p_cli;
+ u32 excess_lines, available_lines;
+ struct qed_cxt_mngr *p_mngr;
+ u32 ilt_page_size, elem_size;
+ struct qed_tid_seg *p_seg;
+ int i;
+
+ available_lines = RESC_NUM(p_hwfn, QED_ILT);
+ excess_lines = used_lines - available_lines;
+
+ if (!excess_lines)
+ return 0;
+
+ if (!QED_IS_RDMA_PERSONALITY(p_hwfn))
+ return 0;
+
+ p_mngr = p_hwfn->p_cxt_mngr;
+ p_cli = &p_mngr->clients[ILT_CLI_CDUT];
+ ilt_page_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
+
+ for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
+ p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
+ if (!p_seg || p_seg->count == 0)
+ continue;
+
+ elem_size = p_mngr->task_type_size[p_seg->type];
+ if (!elem_size)
+ continue;
+
+ return (ilt_page_size / elem_size) * excess_lines;
+ }
+
+ DP_NOTICE(p_hwfn, "failed computing excess ILT lines\n");
+ return 0;
+}
+
+static void qed_cxt_src_t2_free(struct qed_hwfn *p_hwfn)
+{
+ struct qed_src_t2 *p_t2 = &p_hwfn->p_cxt_mngr->src_t2;
+ u32 i;
+
+ if (!p_t2 || !p_t2->dma_mem)
+ return;
+
+ for (i = 0; i < p_t2->num_pages; i++)
+ if (p_t2->dma_mem[i].virt_addr)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ p_t2->dma_mem[i].size,
+ p_t2->dma_mem[i].virt_addr,
+ p_t2->dma_mem[i].phys_addr);
+
+ kfree(p_t2->dma_mem);
+ p_t2->dma_mem = NULL;
+}
+
+static int
+qed_cxt_t2_alloc_pages(struct qed_hwfn *p_hwfn,
+ struct qed_src_t2 *p_t2, u32 total_size, u32 page_size)
+{
+ void **p_virt;
+ u32 size, i;
+
+ if (!p_t2 || !p_t2->dma_mem)
+ return -EINVAL;
+
+ for (i = 0; i < p_t2->num_pages; i++) {
+ size = min_t(u32, total_size, page_size);
+ p_virt = &p_t2->dma_mem[i].virt_addr;
+
+ *p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ size,
+ &p_t2->dma_mem[i].phys_addr,
+ GFP_KERNEL);
+ if (!p_t2->dma_mem[i].virt_addr)
+ return -ENOMEM;
+
+ memset(*p_virt, 0, size);
+ p_t2->dma_mem[i].size = size;
+ total_size -= size;
+ }
+
+ return 0;
+}
+
+static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 conn_num, total_size, ent_per_page, psz, i;
+ struct phys_mem_desc *p_t2_last_page;
+ struct qed_ilt_client_cfg *p_src;
+ struct qed_src_iids src_iids;
+ struct qed_src_t2 *p_t2;
+ int rc;
+
+ memset(&src_iids, 0, sizeof(src_iids));
+
+ /* if the SRC ILT client is inactive - there are no connection
+ * requiring the searcer, leave.
+ */
+ p_src = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_SRC];
+ if (!p_src->active)
+ return 0;
+
+ qed_cxt_src_iids(p_mngr, &src_iids);
+ conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
+ total_size = conn_num * sizeof(struct src_ent);
+
+ /* use the same page size as the SRC ILT client */
+ psz = ILT_PAGE_IN_BYTES(p_src->p_size.val);
+ p_t2 = &p_mngr->src_t2;
+ p_t2->num_pages = DIV_ROUND_UP(total_size, psz);
+
+ /* allocate t2 */
+ p_t2->dma_mem = kcalloc(p_t2->num_pages, sizeof(struct phys_mem_desc),
+ GFP_KERNEL);
+ if (!p_t2->dma_mem) {
+ DP_NOTICE(p_hwfn, "Failed to allocate t2 table\n");
+ rc = -ENOMEM;
+ goto t2_fail;
+ }
+
+ rc = qed_cxt_t2_alloc_pages(p_hwfn, p_t2, total_size, psz);
+ if (rc)
+ goto t2_fail;
+
+ /* Set the t2 pointers */
+
+ /* entries per page - must be a power of two */
+ ent_per_page = psz / sizeof(struct src_ent);
+
+ p_t2->first_free = (u64)p_t2->dma_mem[0].phys_addr;
+
+ p_t2_last_page = &p_t2->dma_mem[(conn_num - 1) / ent_per_page];
+ p_t2->last_free = (u64)p_t2_last_page->phys_addr +
+ ((conn_num - 1) & (ent_per_page - 1)) * sizeof(struct src_ent);
+
+ for (i = 0; i < p_t2->num_pages; i++) {
+ u32 ent_num = min_t(u32,
+ ent_per_page,
+ conn_num);
+ struct src_ent *entries = p_t2->dma_mem[i].virt_addr;
+ u64 p_ent_phys = (u64)p_t2->dma_mem[i].phys_addr, val;
+ u32 j;
+
+ for (j = 0; j < ent_num - 1; j++) {
+ val = p_ent_phys + (j + 1) * sizeof(struct src_ent);
+ entries[j].next = cpu_to_be64(val);
+ }
+
+ if (i < p_t2->num_pages - 1)
+ val = (u64)p_t2->dma_mem[i + 1].phys_addr;
+ else
+ val = 0;
+ entries[j].next = cpu_to_be64(val);
+
+ conn_num -= ent_num;
+ }
+
+ return 0;
+
+t2_fail:
+ qed_cxt_src_t2_free(p_hwfn);
+ return rc;
+}
+
+#define for_each_ilt_valid_client(pos, clients) \
+ for (pos = 0; pos < MAX_ILT_CLIENTS; pos++) \
+ if (!clients[pos].active) { \
+ continue; \
+ } else \
+
+/* Total number of ILT lines used by this PF */
+static u32 qed_cxt_ilt_shadow_size(struct qed_ilt_client_cfg *ilt_clients)
+{
+ u32 size = 0;
+ u32 i;
+
+ for_each_ilt_valid_client(i, ilt_clients)
+ size += (ilt_clients[i].last.val - ilt_clients[i].first.val + 1);
+
+ return size;
+}
+
+static void qed_ilt_shadow_free(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ilt_client_cfg *p_cli = p_hwfn->p_cxt_mngr->clients;
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 ilt_size, i;
+
+ ilt_size = qed_cxt_ilt_shadow_size(p_cli);
+
+ for (i = 0; p_mngr->ilt_shadow && i < ilt_size; i++) {
+ struct phys_mem_desc *p_dma = &p_mngr->ilt_shadow[i];
+
+ if (p_dma->virt_addr)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ p_dma->size, p_dma->virt_addr,
+ p_dma->phys_addr);
+ p_dma->virt_addr = NULL;
+ }
+ kfree(p_mngr->ilt_shadow);
+ p_mngr->ilt_shadow = NULL;
+}
+
+static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
+ struct qed_ilt_cli_blk *p_blk,
+ enum ilt_clients ilt_client,
+ u32 start_line_offset)
+{
+ struct phys_mem_desc *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow;
+ u32 lines, line, sz_left, lines_to_skip = 0;
+
+ /* Special handling for RoCE that supports dynamic allocation */
+ if (QED_IS_RDMA_PERSONALITY(p_hwfn) &&
+ ((ilt_client == ILT_CLI_CDUT) || ilt_client == ILT_CLI_TSDM))
+ return 0;
+
+ lines_to_skip = p_blk->dynamic_line_cnt;
+
+ if (!p_blk->total_size)
+ return 0;
+
+ sz_left = p_blk->total_size;
+ lines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page) - lines_to_skip;
+ line = p_blk->start_line + start_line_offset -
+ p_hwfn->p_cxt_mngr->pf_start_line + lines_to_skip;
+
+ for (; lines; lines--) {
+ dma_addr_t p_phys;
+ void *p_virt;
+ u32 size;
+
+ size = min_t(u32, sz_left, p_blk->real_size_in_page);
+ p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, size,
+ &p_phys, GFP_KERNEL);
+ if (!p_virt)
+ return -ENOMEM;
+
+ ilt_shadow[line].phys_addr = p_phys;
+ ilt_shadow[line].virt_addr = p_virt;
+ ilt_shadow[line].size = size;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_ILT,
+ "ILT shadow: Line [%d] Physical 0x%llx Virtual %p Size %d\n",
+ line, (u64)p_phys, p_virt, size);
+
+ sz_left -= size;
+ line++;
+ }
+
+ return 0;
+}
+
+static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct qed_ilt_client_cfg *clients = p_mngr->clients;
+ struct qed_ilt_cli_blk *p_blk;
+ u32 size, i, j, k;
+ int rc;
+
+ size = qed_cxt_ilt_shadow_size(clients);
+ p_mngr->ilt_shadow = kcalloc(size, sizeof(struct phys_mem_desc),
+ GFP_KERNEL);
+ if (!p_mngr->ilt_shadow) {
+ rc = -ENOMEM;
+ goto ilt_shadow_fail;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_ILT,
+ "Allocated 0x%x bytes for ilt shadow\n",
+ (u32)(size * sizeof(struct phys_mem_desc)));
+
+ for_each_ilt_valid_client(i, clients) {
+ for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) {
+ p_blk = &clients[i].pf_blks[j];
+ rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, 0);
+ if (rc)
+ goto ilt_shadow_fail;
+ }
+ for (k = 0; k < p_mngr->vf_count; k++) {
+ for (j = 0; j < ILT_CLI_VF_BLOCKS; j++) {
+ u32 lines = clients[i].vf_total_lines * k;
+
+ p_blk = &clients[i].vf_blks[j];
+ rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, lines);
+ if (rc)
+ goto ilt_shadow_fail;
+ }
+ }
+ }
+
+ return 0;
+
+ilt_shadow_fail:
+ qed_ilt_shadow_free(p_hwfn);
+ return rc;
+}
+
+static void qed_cid_map_free(struct qed_hwfn *p_hwfn)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 type, vf;
+
+ for (type = 0; type < MAX_CONN_TYPES; type++) {
+ bitmap_free(p_mngr->acquired[type].cid_map);
+ p_mngr->acquired[type].max_count = 0;
+ p_mngr->acquired[type].start_cid = 0;
+
+ for (vf = 0; vf < MAX_NUM_VFS; vf++) {
+ bitmap_free(p_mngr->acquired_vf[type][vf].cid_map);
+ p_mngr->acquired_vf[type][vf].max_count = 0;
+ p_mngr->acquired_vf[type][vf].start_cid = 0;
+ }
+ }
+}
+
+static int
+qed_cid_map_alloc_single(struct qed_hwfn *p_hwfn,
+ u32 type,
+ u32 cid_start,
+ u32 cid_count, struct qed_cid_acquired_map *p_map)
+{
+ if (!cid_count)
+ return 0;
+
+ p_map->cid_map = bitmap_zalloc(cid_count, GFP_KERNEL);
+ if (!p_map->cid_map)
+ return -ENOMEM;
+
+ p_map->max_count = cid_count;
+ p_map->start_cid = cid_start;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_CXT,
+ "Type %08x start: %08x count %08x\n",
+ type, p_map->start_cid, p_map->max_count);
+
+ return 0;
+}
+
+static int qed_cid_map_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 start_cid = 0, vf_start_cid = 0;
+ u32 type, vf;
+
+ for (type = 0; type < MAX_CONN_TYPES; type++) {
+ struct qed_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[type];
+ struct qed_cid_acquired_map *p_map;
+
+ /* Handle PF maps */
+ p_map = &p_mngr->acquired[type];
+ if (qed_cid_map_alloc_single(p_hwfn, type, start_cid,
+ p_cfg->cid_count, p_map))
+ goto cid_map_fail;
+
+ /* Handle VF maps */
+ for (vf = 0; vf < MAX_NUM_VFS; vf++) {
+ p_map = &p_mngr->acquired_vf[type][vf];
+ if (qed_cid_map_alloc_single(p_hwfn, type,
+ vf_start_cid,
+ p_cfg->cids_per_vf, p_map))
+ goto cid_map_fail;
+ }
+
+ start_cid += p_cfg->cid_count;
+ vf_start_cid += p_cfg->cids_per_vf;
+ }
+
+ return 0;
+
+cid_map_fail:
+ qed_cid_map_free(p_hwfn);
+ return -ENOMEM;
+}
+
+int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ilt_client_cfg *clients;
+ struct qed_cxt_mngr *p_mngr;
+ u32 i;
+
+ p_mngr = kzalloc(sizeof(*p_mngr), GFP_KERNEL);
+ if (!p_mngr)
+ return -ENOMEM;
+
+ /* Initialize ILT client registers */
+ clients = p_mngr->clients;
+ clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT);
+ clients[ILT_CLI_CDUC].last.reg = ILT_CFG_REG(CDUC, LAST_ILT);
+ clients[ILT_CLI_CDUC].p_size.reg = ILT_CFG_REG(CDUC, P_SIZE);
+
+ clients[ILT_CLI_QM].first.reg = ILT_CFG_REG(QM, FIRST_ILT);
+ clients[ILT_CLI_QM].last.reg = ILT_CFG_REG(QM, LAST_ILT);
+ clients[ILT_CLI_QM].p_size.reg = ILT_CFG_REG(QM, P_SIZE);
+
+ clients[ILT_CLI_TM].first.reg = ILT_CFG_REG(TM, FIRST_ILT);
+ clients[ILT_CLI_TM].last.reg = ILT_CFG_REG(TM, LAST_ILT);
+ clients[ILT_CLI_TM].p_size.reg = ILT_CFG_REG(TM, P_SIZE);
+
+ clients[ILT_CLI_SRC].first.reg = ILT_CFG_REG(SRC, FIRST_ILT);
+ clients[ILT_CLI_SRC].last.reg = ILT_CFG_REG(SRC, LAST_ILT);
+ clients[ILT_CLI_SRC].p_size.reg = ILT_CFG_REG(SRC, P_SIZE);
+
+ clients[ILT_CLI_CDUT].first.reg = ILT_CFG_REG(CDUT, FIRST_ILT);
+ clients[ILT_CLI_CDUT].last.reg = ILT_CFG_REG(CDUT, LAST_ILT);
+ clients[ILT_CLI_CDUT].p_size.reg = ILT_CFG_REG(CDUT, P_SIZE);
+
+ clients[ILT_CLI_TSDM].first.reg = ILT_CFG_REG(TSDM, FIRST_ILT);
+ clients[ILT_CLI_TSDM].last.reg = ILT_CFG_REG(TSDM, LAST_ILT);
+ clients[ILT_CLI_TSDM].p_size.reg = ILT_CFG_REG(TSDM, P_SIZE);
+ /* default ILT page size for all clients is 64K */
+ for (i = 0; i < MAX_ILT_CLIENTS; i++)
+ p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
+
+ p_mngr->conn_ctx_size = CONN_CXT_SIZE(p_hwfn);
+
+ /* Initialize task sizes */
+ p_mngr->task_type_size[0] = TYPE0_TASK_CXT_SIZE(p_hwfn);
+ p_mngr->task_type_size[1] = TYPE1_TASK_CXT_SIZE(p_hwfn);
+
+ if (p_hwfn->cdev->p_iov_info) {
+ p_mngr->vf_count = p_hwfn->cdev->p_iov_info->total_vfs;
+ p_mngr->first_vf_in_pf =
+ p_hwfn->cdev->p_iov_info->first_vf_in_pf;
+ }
+ /* Initialize the dynamic ILT allocation mutex */
+ mutex_init(&p_mngr->mutex);
+
+ /* Set the cxt mangr pointer priori to further allocations */
+ p_hwfn->p_cxt_mngr = p_mngr;
+
+ return 0;
+}
+
+int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn)
+{
+ int rc;
+
+ /* Allocate the ILT shadow table */
+ rc = qed_ilt_shadow_alloc(p_hwfn);
+ if (rc)
+ goto tables_alloc_fail;
+
+ /* Allocate the T2 table */
+ rc = qed_cxt_src_t2_alloc(p_hwfn);
+ if (rc)
+ goto tables_alloc_fail;
+
+ /* Allocate and initialize the acquired cids bitmaps */
+ rc = qed_cid_map_alloc(p_hwfn);
+ if (rc)
+ goto tables_alloc_fail;
+
+ return 0;
+
+tables_alloc_fail:
+ qed_cxt_mngr_free(p_hwfn);
+ return rc;
+}
+
+void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn)
+{
+ if (!p_hwfn->p_cxt_mngr)
+ return;
+
+ qed_cid_map_free(p_hwfn);
+ qed_cxt_src_t2_free(p_hwfn);
+ qed_ilt_shadow_free(p_hwfn);
+ kfree(p_hwfn->p_cxt_mngr);
+
+ p_hwfn->p_cxt_mngr = NULL;
+}
+
+void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct qed_cid_acquired_map *p_map;
+ struct qed_conn_type_cfg *p_cfg;
+ int type;
+
+ /* Reset acquired cids */
+ for (type = 0; type < MAX_CONN_TYPES; type++) {
+ u32 vf;
+
+ p_cfg = &p_mngr->conn_cfg[type];
+ if (p_cfg->cid_count) {
+ p_map = &p_mngr->acquired[type];
+ bitmap_zero(p_map->cid_map, p_map->max_count);
+ }
+
+ if (!p_cfg->cids_per_vf)
+ continue;
+
+ for (vf = 0; vf < MAX_NUM_VFS; vf++) {
+ p_map = &p_mngr->acquired_vf[type][vf];
+ bitmap_zero(p_map->cid_map, p_map->max_count);
+ }
+ }
+}
+
+/* CDU Common */
+#define CDUC_CXT_SIZE_SHIFT \
+ CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT
+
+#define CDUC_CXT_SIZE_MASK \
+ (CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE >> CDUC_CXT_SIZE_SHIFT)
+
+#define CDUC_BLOCK_WASTE_SHIFT \
+ CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT
+
+#define CDUC_BLOCK_WASTE_MASK \
+ (CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE >> CDUC_BLOCK_WASTE_SHIFT)
+
+#define CDUC_NCIB_SHIFT \
+ CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT
+
+#define CDUC_NCIB_MASK \
+ (CDU_REG_CID_ADDR_PARAMS_NCIB >> CDUC_NCIB_SHIFT)
+
+#define CDUT_TYPE0_CXT_SIZE_SHIFT \
+ CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE_SHIFT
+
+#define CDUT_TYPE0_CXT_SIZE_MASK \
+ (CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE >> \
+ CDUT_TYPE0_CXT_SIZE_SHIFT)
+
+#define CDUT_TYPE0_BLOCK_WASTE_SHIFT \
+ CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE_SHIFT
+
+#define CDUT_TYPE0_BLOCK_WASTE_MASK \
+ (CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE >> \
+ CDUT_TYPE0_BLOCK_WASTE_SHIFT)
+
+#define CDUT_TYPE0_NCIB_SHIFT \
+ CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK_SHIFT
+
+#define CDUT_TYPE0_NCIB_MASK \
+ (CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK >> \
+ CDUT_TYPE0_NCIB_SHIFT)
+
+#define CDUT_TYPE1_CXT_SIZE_SHIFT \
+ CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE_SHIFT
+
+#define CDUT_TYPE1_CXT_SIZE_MASK \
+ (CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE >> \
+ CDUT_TYPE1_CXT_SIZE_SHIFT)
+
+#define CDUT_TYPE1_BLOCK_WASTE_SHIFT \
+ CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE_SHIFT
+
+#define CDUT_TYPE1_BLOCK_WASTE_MASK \
+ (CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE >> \
+ CDUT_TYPE1_BLOCK_WASTE_SHIFT)
+
+#define CDUT_TYPE1_NCIB_SHIFT \
+ CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK_SHIFT
+
+#define CDUT_TYPE1_NCIB_MASK \
+ (CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK >> \
+ CDUT_TYPE1_NCIB_SHIFT)
+
+static void qed_cdu_init_common(struct qed_hwfn *p_hwfn)
+{
+ u32 page_sz, elems_per_page, block_waste, cxt_size, cdu_params = 0;
+
+ /* CDUC - connection configuration */
+ page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
+ cxt_size = CONN_CXT_SIZE(p_hwfn);
+ elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
+ block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
+
+ SET_FIELD(cdu_params, CDUC_CXT_SIZE, cxt_size);
+ SET_FIELD(cdu_params, CDUC_BLOCK_WASTE, block_waste);
+ SET_FIELD(cdu_params, CDUC_NCIB, elems_per_page);
+ STORE_RT_REG(p_hwfn, CDU_REG_CID_ADDR_PARAMS_RT_OFFSET, cdu_params);
+
+ /* CDUT - type-0 tasks configuration */
+ page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT].p_size.val;
+ cxt_size = p_hwfn->p_cxt_mngr->task_type_size[0];
+ elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
+ block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
+
+ /* cxt size and block-waste are multipes of 8 */
+ cdu_params = 0;
+ SET_FIELD(cdu_params, CDUT_TYPE0_CXT_SIZE, (cxt_size >> 3));
+ SET_FIELD(cdu_params, CDUT_TYPE0_BLOCK_WASTE, (block_waste >> 3));
+ SET_FIELD(cdu_params, CDUT_TYPE0_NCIB, elems_per_page);
+ STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT0_PARAMS_RT_OFFSET, cdu_params);
+
+ /* CDUT - type-1 tasks configuration */
+ cxt_size = p_hwfn->p_cxt_mngr->task_type_size[1];
+ elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
+ block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
+
+ /* cxt size and block-waste are multipes of 8 */
+ cdu_params = 0;
+ SET_FIELD(cdu_params, CDUT_TYPE1_CXT_SIZE, (cxt_size >> 3));
+ SET_FIELD(cdu_params, CDUT_TYPE1_BLOCK_WASTE, (block_waste >> 3));
+ SET_FIELD(cdu_params, CDUT_TYPE1_NCIB, elems_per_page);
+ STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT1_PARAMS_RT_OFFSET, cdu_params);
+}
+
+/* CDU PF */
+#define CDU_SEG_REG_TYPE_SHIFT CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT
+#define CDU_SEG_REG_TYPE_MASK 0x1
+#define CDU_SEG_REG_OFFSET_SHIFT 0
+#define CDU_SEG_REG_OFFSET_MASK CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK
+
+static void qed_cdu_init_pf(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ilt_client_cfg *p_cli;
+ struct qed_tid_seg *p_seg;
+ u32 cdu_seg_params, offset;
+ int i;
+
+ static const u32 rt_type_offset_arr[] = {
+ CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET,
+ CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET,
+ CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET,
+ CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET
+ };
+
+ static const u32 rt_type_offset_fl_arr[] = {
+ CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET,
+ CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET,
+ CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET,
+ CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET
+ };
+
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
+
+ /* There are initializations only for CDUT during pf Phase */
+ for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
+ /* Segment 0 */
+ p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
+ if (!p_seg)
+ continue;
+
+ /* Note: start_line is already adjusted for the CDU
+ * segment register granularity, so we just need to
+ * divide. Adjustment is implicit as we assume ILT
+ * Page size is larger than 32K!
+ */
+ offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) *
+ (p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line -
+ p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES;
+
+ cdu_seg_params = 0;
+ SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type);
+ SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset);
+ STORE_RT_REG(p_hwfn, rt_type_offset_arr[i], cdu_seg_params);
+
+ offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) *
+ (p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)].start_line -
+ p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES;
+
+ cdu_seg_params = 0;
+ SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type);
+ SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset);
+ STORE_RT_REG(p_hwfn, rt_type_offset_fl_arr[i], cdu_seg_params);
+ }
+}
+
+void qed_qm_init_pf(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, bool is_pf_loading)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+ struct qed_qm_pf_rt_init_params params;
+ struct qed_qm_iids iids;
+
+ memset(&iids, 0, sizeof(iids));
+ qed_cxt_qm_iids(p_hwfn, &iids);
+
+ memset(&params, 0, sizeof(params));
+ params.port_id = p_hwfn->port_id;
+ params.pf_id = p_hwfn->rel_pf_id;
+ params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
+ params.is_pf_loading = is_pf_loading;
+ params.num_pf_cids = iids.cids;
+ params.num_vf_cids = iids.vf_cids;
+ params.num_tids = iids.tids;
+ params.start_pq = qm_info->start_pq;
+ params.num_pf_pqs = qm_info->num_pqs - qm_info->num_vf_pqs;
+ params.num_vf_pqs = qm_info->num_vf_pqs;
+ params.start_vport = qm_info->start_vport;
+ params.num_vports = qm_info->num_vports;
+ params.pf_wfq = qm_info->pf_wfq;
+ params.pf_rl = qm_info->pf_rl;
+ params.pq_params = qm_info->qm_pq_params;
+ params.vport_params = qm_info->qm_vport_params;
+
+ qed_qm_pf_rt_init(p_hwfn, p_ptt, &params);
+}
+
+/* CM PF */
+static void qed_cm_init_pf(struct qed_hwfn *p_hwfn)
+{
+ /* XCM pure-LB queue */
+ STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET,
+ qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB));
+}
+
+/* DQ PF */
+static void qed_dq_init_pf(struct qed_hwfn *p_hwfn)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 dq_pf_max_cid = 0, dq_vf_max_cid = 0;
+
+ dq_pf_max_cid += (p_mngr->conn_cfg[0].cid_count >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_0_RT_OFFSET, dq_pf_max_cid);
+
+ dq_vf_max_cid += (p_mngr->conn_cfg[0].cids_per_vf >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_0_RT_OFFSET, dq_vf_max_cid);
+
+ dq_pf_max_cid += (p_mngr->conn_cfg[1].cid_count >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_1_RT_OFFSET, dq_pf_max_cid);
+
+ dq_vf_max_cid += (p_mngr->conn_cfg[1].cids_per_vf >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_1_RT_OFFSET, dq_vf_max_cid);
+
+ dq_pf_max_cid += (p_mngr->conn_cfg[2].cid_count >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_2_RT_OFFSET, dq_pf_max_cid);
+
+ dq_vf_max_cid += (p_mngr->conn_cfg[2].cids_per_vf >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_2_RT_OFFSET, dq_vf_max_cid);
+
+ dq_pf_max_cid += (p_mngr->conn_cfg[3].cid_count >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_3_RT_OFFSET, dq_pf_max_cid);
+
+ dq_vf_max_cid += (p_mngr->conn_cfg[3].cids_per_vf >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_3_RT_OFFSET, dq_vf_max_cid);
+
+ dq_pf_max_cid += (p_mngr->conn_cfg[4].cid_count >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_4_RT_OFFSET, dq_pf_max_cid);
+
+ dq_vf_max_cid += (p_mngr->conn_cfg[4].cids_per_vf >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_4_RT_OFFSET, dq_vf_max_cid);
+
+ dq_pf_max_cid += (p_mngr->conn_cfg[5].cid_count >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_5_RT_OFFSET, dq_pf_max_cid);
+
+ dq_vf_max_cid += (p_mngr->conn_cfg[5].cids_per_vf >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_5_RT_OFFSET, dq_vf_max_cid);
+
+ /* Connection types 6 & 7 are not in use, yet they must be configured
+ * as the highest possible connection. Not configuring them means the
+ * defaults will be used, and with a large number of cids a bug may
+ * occur, if the defaults will be smaller than dq_pf_max_cid /
+ * dq_vf_max_cid.
+ */
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_6_RT_OFFSET, dq_pf_max_cid);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_6_RT_OFFSET, dq_vf_max_cid);
+
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_7_RT_OFFSET, dq_pf_max_cid);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_7_RT_OFFSET, dq_vf_max_cid);
+}
+
+static void qed_ilt_bounds_init(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ilt_client_cfg *ilt_clients;
+ int i;
+
+ ilt_clients = p_hwfn->p_cxt_mngr->clients;
+ for_each_ilt_valid_client(i, ilt_clients) {
+ STORE_RT_REG(p_hwfn,
+ ilt_clients[i].first.reg,
+ ilt_clients[i].first.val);
+ STORE_RT_REG(p_hwfn,
+ ilt_clients[i].last.reg, ilt_clients[i].last.val);
+ STORE_RT_REG(p_hwfn,
+ ilt_clients[i].p_size.reg,
+ ilt_clients[i].p_size.val);
+ }
+}
+
+static void qed_ilt_vf_bounds_init(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ilt_client_cfg *p_cli;
+ u32 blk_factor;
+
+ /* For simplicty we set the 'block' to be an ILT page */
+ if (p_hwfn->cdev->p_iov_info) {
+ struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
+
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_VF_BASE_RT_OFFSET,
+ p_iov->first_vf_in_pf);
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET,
+ p_iov->first_vf_in_pf + p_iov->total_vfs);
+ }
+
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
+ blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
+ if (p_cli->active) {
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET,
+ blk_factor);
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
+ p_cli->pf_total_lines);
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET,
+ p_cli->vf_total_lines);
+ }
+
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
+ blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
+ if (p_cli->active) {
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET,
+ blk_factor);
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
+ p_cli->pf_total_lines);
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET,
+ p_cli->vf_total_lines);
+ }
+
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TM];
+ blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
+ if (p_cli->active) {
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET, blk_factor);
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
+ p_cli->pf_total_lines);
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET,
+ p_cli->vf_total_lines);
+ }
+}
+
+/* ILT (PSWRQ2) PF */
+static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ilt_client_cfg *clients;
+ struct qed_cxt_mngr *p_mngr;
+ struct phys_mem_desc *p_shdw;
+ u32 line, rt_offst, i;
+
+ qed_ilt_bounds_init(p_hwfn);
+ qed_ilt_vf_bounds_init(p_hwfn);
+
+ p_mngr = p_hwfn->p_cxt_mngr;
+ p_shdw = p_mngr->ilt_shadow;
+ clients = p_hwfn->p_cxt_mngr->clients;
+
+ for_each_ilt_valid_client(i, clients) {
+ /** Client's 1st val and RT array are absolute, ILT shadows'
+ * lines are relative.
+ */
+ line = clients[i].first.val - p_mngr->pf_start_line;
+ rt_offst = PSWRQ2_REG_ILT_MEMORY_RT_OFFSET +
+ clients[i].first.val * ILT_ENTRY_IN_REGS;
+
+ for (; line <= clients[i].last.val - p_mngr->pf_start_line;
+ line++, rt_offst += ILT_ENTRY_IN_REGS) {
+ u64 ilt_hw_entry = 0;
+
+ /** p_virt could be NULL incase of dynamic
+ * allocation
+ */
+ if (p_shdw[line].virt_addr) {
+ SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
+ SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR,
+ (p_shdw[line].phys_addr >> 12));
+
+ DP_VERBOSE(p_hwfn, QED_MSG_ILT,
+ "Setting RT[0x%08x] from ILT[0x%08x] [Client is %d] to Physical addr: 0x%llx\n",
+ rt_offst, line, i,
+ (u64)(p_shdw[line].phys_addr >> 12));
+ }
+
+ STORE_RT_REG_AGG(p_hwfn, rt_offst, ilt_hw_entry);
+ }
+ }
+}
+
+/* SRC (Searcher) PF */
+static void qed_src_init_pf(struct qed_hwfn *p_hwfn)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 rounded_conn_num, conn_num, conn_max;
+ struct qed_src_iids src_iids;
+
+ memset(&src_iids, 0, sizeof(src_iids));
+ qed_cxt_src_iids(p_mngr, &src_iids);
+ conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
+ if (!conn_num)
+ return;
+
+ conn_max = max_t(u32, conn_num, SRC_MIN_NUM_ELEMS);
+ rounded_conn_num = roundup_pow_of_two(conn_max);
+
+ STORE_RT_REG(p_hwfn, SRC_REG_COUNTFREE_RT_OFFSET, conn_num);
+ STORE_RT_REG(p_hwfn, SRC_REG_NUMBER_HASH_BITS_RT_OFFSET,
+ ilog2(rounded_conn_num));
+
+ STORE_RT_REG_AGG(p_hwfn, SRC_REG_FIRSTFREE_RT_OFFSET,
+ p_hwfn->p_cxt_mngr->src_t2.first_free);
+ STORE_RT_REG_AGG(p_hwfn, SRC_REG_LASTFREE_RT_OFFSET,
+ p_hwfn->p_cxt_mngr->src_t2.last_free);
+}
+
+/* Timers PF */
+#define TM_CFG_NUM_IDS_SHIFT 0
+#define TM_CFG_NUM_IDS_MASK 0xFFFFULL
+#define TM_CFG_PRE_SCAN_OFFSET_SHIFT 16
+#define TM_CFG_PRE_SCAN_OFFSET_MASK 0x1FFULL
+#define TM_CFG_PARENT_PF_SHIFT 25
+#define TM_CFG_PARENT_PF_MASK 0x7ULL
+
+#define TM_CFG_CID_PRE_SCAN_ROWS_SHIFT 30
+#define TM_CFG_CID_PRE_SCAN_ROWS_MASK 0x1FFULL
+
+#define TM_CFG_TID_OFFSET_SHIFT 30
+#define TM_CFG_TID_OFFSET_MASK 0x7FFFFULL
+#define TM_CFG_TID_PRE_SCAN_ROWS_SHIFT 49
+#define TM_CFG_TID_PRE_SCAN_ROWS_MASK 0x1FFULL
+
+static void qed_tm_init_pf(struct qed_hwfn *p_hwfn)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 active_seg_mask = 0, tm_offset, rt_reg;
+ struct qed_tm_iids tm_iids;
+ u64 cfg_word;
+ u8 i;
+
+ memset(&tm_iids, 0, sizeof(tm_iids));
+ qed_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids);
+
+ /* @@@TBD No pre-scan for now */
+
+ /* Note: We assume consecutive VFs for a PF */
+ for (i = 0; i < p_mngr->vf_count; i++) {
+ cfg_word = 0;
+ SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_cids);
+ SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
+ SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id);
+ SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0);
+ rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
+ (sizeof(cfg_word) / sizeof(u32)) *
+ (p_hwfn->cdev->p_iov_info->first_vf_in_pf + i);
+ STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
+ }
+
+ cfg_word = 0;
+ SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_cids);
+ SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
+ SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0); /* n/a for PF */
+ SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0); /* scan all */
+
+ rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
+ (sizeof(cfg_word) / sizeof(u32)) *
+ (NUM_OF_VFS(p_hwfn->cdev) + p_hwfn->rel_pf_id);
+ STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
+
+ /* enale scan */
+ STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_CONN_RT_OFFSET,
+ tm_iids.pf_cids ? 0x1 : 0x0);
+
+ /* @@@TBD how to enable the scan for the VFs */
+
+ tm_offset = tm_iids.per_vf_cids;
+
+ /* Note: We assume consecutive VFs for a PF */
+ for (i = 0; i < p_mngr->vf_count; i++) {
+ cfg_word = 0;
+ SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_tids);
+ SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
+ SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id);
+ SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset);
+ SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64) 0);
+
+ rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET +
+ (sizeof(cfg_word) / sizeof(u32)) *
+ (p_hwfn->cdev->p_iov_info->first_vf_in_pf + i);
+
+ STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
+ }
+
+ tm_offset = tm_iids.pf_cids;
+ for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
+ cfg_word = 0;
+ SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_tids[i]);
+ SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
+ SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0);
+ SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset);
+ SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64) 0);
+
+ rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET +
+ (sizeof(cfg_word) / sizeof(u32)) *
+ (NUM_OF_VFS(p_hwfn->cdev) +
+ p_hwfn->rel_pf_id * NUM_TASK_PF_SEGMENTS + i);
+
+ STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
+ active_seg_mask |= (tm_iids.pf_tids[i] ? BIT(i) : 0);
+
+ tm_offset += tm_iids.pf_tids[i];
+ }
+
+ if (QED_IS_RDMA_PERSONALITY(p_hwfn))
+ active_seg_mask = 0;
+
+ STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_TASK_RT_OFFSET, active_seg_mask);
+
+ /* @@@TBD how to enable the scan for the VFs */
+}
+
+static void qed_prs_init_common(struct qed_hwfn *p_hwfn)
+{
+ if ((p_hwfn->hw_info.personality == QED_PCI_FCOE) &&
+ p_hwfn->pf_params.fcoe_pf_params.is_target)
+ STORE_RT_REG(p_hwfn,
+ PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET, 0);
+}
+
+static void qed_prs_init_pf(struct qed_hwfn *p_hwfn)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct qed_conn_type_cfg *p_fcoe;
+ struct qed_tid_seg *p_tid;
+
+ p_fcoe = &p_mngr->conn_cfg[PROTOCOLID_FCOE];
+
+ /* If FCoE is active set the MAX OX_ID (tid) in the Parser */
+ if (!p_fcoe->cid_count)
+ return;
+
+ p_tid = &p_fcoe->tid_seg[QED_CXT_FCOE_TID_SEG];
+ if (p_hwfn->pf_params.fcoe_pf_params.is_target) {
+ STORE_RT_REG_AGG(p_hwfn,
+ PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET,
+ p_tid->count);
+ } else {
+ STORE_RT_REG_AGG(p_hwfn,
+ PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET,
+ p_tid->count);
+ }
+}
+
+void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn)
+{
+ qed_cdu_init_common(p_hwfn);
+ qed_prs_init_common(p_hwfn);
+}
+
+void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ qed_qm_init_pf(p_hwfn, p_ptt, true);
+ qed_cm_init_pf(p_hwfn);
+ qed_dq_init_pf(p_hwfn);
+ qed_cdu_init_pf(p_hwfn);
+ qed_ilt_init_pf(p_hwfn);
+ qed_src_init_pf(p_hwfn);
+ qed_tm_init_pf(p_hwfn);
+ qed_prs_init_pf(p_hwfn);
+}
+
+int _qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
+ enum protocol_type type, u32 *p_cid, u8 vfid)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct qed_cid_acquired_map *p_map;
+ u32 rel_cid;
+
+ if (type >= MAX_CONN_TYPES) {
+ DP_NOTICE(p_hwfn, "Invalid protocol type %d", type);
+ return -EINVAL;
+ }
+
+ if (vfid >= MAX_NUM_VFS && vfid != QED_CXT_PF_CID) {
+ DP_NOTICE(p_hwfn, "VF [%02x] is out of range\n", vfid);
+ return -EINVAL;
+ }
+
+ /* Determine the right map to take this CID from */
+ if (vfid == QED_CXT_PF_CID)
+ p_map = &p_mngr->acquired[type];
+ else
+ p_map = &p_mngr->acquired_vf[type][vfid];
+
+ if (!p_map->cid_map) {
+ DP_NOTICE(p_hwfn, "Invalid protocol type %d", type);
+ return -EINVAL;
+ }
+
+ rel_cid = find_first_zero_bit(p_map->cid_map, p_map->max_count);
+
+ if (rel_cid >= p_map->max_count) {
+ DP_NOTICE(p_hwfn, "no CID available for protocol %d\n", type);
+ return -EINVAL;
+ }
+
+ __set_bit(rel_cid, p_map->cid_map);
+
+ *p_cid = rel_cid + p_map->start_cid;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_CXT,
+ "Acquired cid 0x%08x [rel. %08x] vfid %02x type %d\n",
+ *p_cid, rel_cid, vfid, type);
+
+ return 0;
+}
+
+int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
+ enum protocol_type type, u32 *p_cid)
+{
+ return _qed_cxt_acquire_cid(p_hwfn, type, p_cid, QED_CXT_PF_CID);
+}
+
+static bool qed_cxt_test_cid_acquired(struct qed_hwfn *p_hwfn,
+ u32 cid,
+ u8 vfid,
+ enum protocol_type *p_type,
+ struct qed_cid_acquired_map **pp_map)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 rel_cid;
+
+ /* Iterate over protocols and find matching cid range */
+ for (*p_type = 0; *p_type < MAX_CONN_TYPES; (*p_type)++) {
+ if (vfid == QED_CXT_PF_CID)
+ *pp_map = &p_mngr->acquired[*p_type];
+ else
+ *pp_map = &p_mngr->acquired_vf[*p_type][vfid];
+
+ if (!((*pp_map)->cid_map))
+ continue;
+ if (cid >= (*pp_map)->start_cid &&
+ cid < (*pp_map)->start_cid + (*pp_map)->max_count)
+ break;
+ }
+
+ if (*p_type == MAX_CONN_TYPES) {
+ DP_NOTICE(p_hwfn, "Invalid CID %d vfid %02x", cid, vfid);
+ goto fail;
+ }
+
+ rel_cid = cid - (*pp_map)->start_cid;
+ if (!test_bit(rel_cid, (*pp_map)->cid_map)) {
+ DP_NOTICE(p_hwfn, "CID %d [vifd %02x] not acquired",
+ cid, vfid);
+ goto fail;
+ }
+
+ return true;
+fail:
+ *p_type = MAX_CONN_TYPES;
+ *pp_map = NULL;
+ return false;
+}
+
+void _qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid, u8 vfid)
+{
+ struct qed_cid_acquired_map *p_map = NULL;
+ enum protocol_type type;
+ bool b_acquired;
+ u32 rel_cid;
+
+ if (vfid != QED_CXT_PF_CID && vfid > MAX_NUM_VFS) {
+ DP_NOTICE(p_hwfn,
+ "Trying to return incorrect CID belonging to VF %02x\n",
+ vfid);
+ return;
+ }
+
+ /* Test acquired and find matching per-protocol map */
+ b_acquired = qed_cxt_test_cid_acquired(p_hwfn, cid, vfid,
+ &type, &p_map);
+
+ if (!b_acquired)
+ return;
+
+ rel_cid = cid - p_map->start_cid;
+ clear_bit(rel_cid, p_map->cid_map);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_CXT,
+ "Released CID 0x%08x [rel. %08x] vfid %02x type %d\n",
+ cid, rel_cid, vfid, type);
+}
+
+void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid)
+{
+ _qed_cxt_release_cid(p_hwfn, cid, QED_CXT_PF_CID);
+}
+
+int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, struct qed_cxt_info *p_info)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct qed_cid_acquired_map *p_map = NULL;
+ u32 conn_cxt_size, hw_p_size, cxts_per_p, line;
+ enum protocol_type type;
+ bool b_acquired;
+
+ /* Test acquired and find matching per-protocol map */
+ b_acquired = qed_cxt_test_cid_acquired(p_hwfn, p_info->iid,
+ QED_CXT_PF_CID, &type, &p_map);
+
+ if (!b_acquired)
+ return -EINVAL;
+
+ /* set the protocl type */
+ p_info->type = type;
+
+ /* compute context virtual pointer */
+ hw_p_size = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
+
+ conn_cxt_size = CONN_CXT_SIZE(p_hwfn);
+ cxts_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / conn_cxt_size;
+ line = p_info->iid / cxts_per_p;
+
+ /* Make sure context is allocated (dynamic allocation) */
+ if (!p_mngr->ilt_shadow[line].virt_addr)
+ return -EINVAL;
+
+ p_info->p_cxt = p_mngr->ilt_shadow[line].virt_addr +
+ p_info->iid % cxts_per_p * conn_cxt_size;
+
+ DP_VERBOSE(p_hwfn, (QED_MSG_ILT | QED_MSG_CXT),
+ "Accessing ILT shadow[%d]: CXT pointer is at %p (for iid %d)\n",
+ p_info->iid / cxts_per_p, p_info->p_cxt, p_info->iid);
+
+ return 0;
+}
+
+static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_pf_params *p_params,
+ u32 num_tasks)
+{
+ u32 num_cons, num_qps;
+ enum protocol_type proto;
+
+ if (p_hwfn->mcp_info->func_info.protocol == QED_PCI_ETH_RDMA) {
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "Current day drivers don't support RoCE & iWARP simultaneously on the same PF. Default to RoCE-only\n");
+ p_hwfn->hw_info.personality = QED_PCI_ETH_ROCE;
+ }
+
+ switch (p_hwfn->hw_info.personality) {
+ case QED_PCI_ETH_IWARP:
+ /* Each QP requires one connection */
+ num_cons = min_t(u32, IWARP_MAX_QPS, p_params->num_qps);
+ proto = PROTOCOLID_IWARP;
+ break;
+ case QED_PCI_ETH_ROCE:
+ num_qps = min_t(u32, ROCE_MAX_QPS, p_params->num_qps);
+ num_cons = num_qps * 2; /* each QP requires two connections */
+ proto = PROTOCOLID_ROCE;
+ break;
+ default:
+ return;
+ }
+
+ if (num_cons && num_tasks) {
+ u32 num_srqs, num_xrc_srqs;
+
+ qed_cxt_set_proto_cid_count(p_hwfn, proto, num_cons, 0);
+
+ /* Deliberatly passing ROCE for tasks id. This is because
+ * iWARP / RoCE share the task id.
+ */
+ qed_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_ROCE,
+ QED_CXT_ROCE_TID_SEG, 1,
+ num_tasks, false);
+
+ num_srqs = min_t(u32, QED_RDMA_MAX_SRQS, p_params->num_srqs);
+
+ /* XRC SRQs populate a single ILT page */
+ num_xrc_srqs = qed_cxt_xrc_srqs_per_page(p_hwfn);
+
+ qed_cxt_set_srq_count(p_hwfn, num_srqs, num_xrc_srqs);
+ } else {
+ DP_INFO(p_hwfn->cdev,
+ "RDMA personality used without setting params!\n");
+ }
+}
+
+int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks)
+{
+ /* Set the number of required CORE connections */
+ u32 core_cids = 1; /* SPQ */
+
+ if (p_hwfn->using_ll2)
+ core_cids += 4;
+ qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids, 0);
+
+ switch (p_hwfn->hw_info.personality) {
+ case QED_PCI_ETH_RDMA:
+ case QED_PCI_ETH_IWARP:
+ case QED_PCI_ETH_ROCE:
+ {
+ qed_rdma_set_pf_params(p_hwfn,
+ &p_hwfn->
+ pf_params.rdma_pf_params,
+ rdma_tasks);
+ /* no need for break since RoCE coexist with Ethernet */
+ }
+ fallthrough;
+ case QED_PCI_ETH:
+ {
+ struct qed_eth_pf_params *p_params =
+ &p_hwfn->pf_params.eth_pf_params;
+
+ if (!p_params->num_vf_cons)
+ p_params->num_vf_cons =
+ ETH_PF_PARAMS_VF_CONS_DEFAULT;
+ qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
+ p_params->num_cons,
+ p_params->num_vf_cons);
+ p_hwfn->p_cxt_mngr->arfs_count = p_params->num_arfs_filters;
+ break;
+ }
+ case QED_PCI_FCOE:
+ {
+ struct qed_fcoe_pf_params *p_params;
+
+ p_params = &p_hwfn->pf_params.fcoe_pf_params;
+
+ if (p_params->num_cons && p_params->num_tasks) {
+ qed_cxt_set_proto_cid_count(p_hwfn,
+ PROTOCOLID_FCOE,
+ p_params->num_cons,
+ 0);
+ qed_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_FCOE,
+ QED_CXT_FCOE_TID_SEG, 0,
+ p_params->num_tasks, true);
+ } else {
+ DP_INFO(p_hwfn->cdev,
+ "Fcoe personality used without setting params!\n");
+ }
+ break;
+ }
+ case QED_PCI_ISCSI:
+ {
+ struct qed_iscsi_pf_params *p_params;
+
+ p_params = &p_hwfn->pf_params.iscsi_pf_params;
+
+ if (p_params->num_cons && p_params->num_tasks) {
+ qed_cxt_set_proto_cid_count(p_hwfn,
+ PROTOCOLID_TCP_ULP,
+ p_params->num_cons,
+ 0);
+ qed_cxt_set_proto_tid_count(p_hwfn,
+ PROTOCOLID_TCP_ULP,
+ QED_CXT_TCP_ULP_TID_SEG,
+ 0,
+ p_params->num_tasks,
+ true);
+ } else {
+ DP_INFO(p_hwfn->cdev,
+ "Iscsi personality used without setting params!\n");
+ }
+ break;
+ }
+ case QED_PCI_NVMETCP:
+ {
+ struct qed_nvmetcp_pf_params *p_params;
+
+ p_params = &p_hwfn->pf_params.nvmetcp_pf_params;
+
+ if (p_params->num_cons && p_params->num_tasks) {
+ qed_cxt_set_proto_cid_count(p_hwfn,
+ PROTOCOLID_TCP_ULP,
+ p_params->num_cons,
+ 0);
+ qed_cxt_set_proto_tid_count(p_hwfn,
+ PROTOCOLID_TCP_ULP,
+ QED_CXT_TCP_ULP_TID_SEG,
+ 0,
+ p_params->num_tasks,
+ true);
+ } else {
+ DP_INFO(p_hwfn->cdev,
+ "NvmeTCP personality used without setting params!\n");
+ }
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn,
+ struct qed_tid_mem *p_info)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 proto, seg, total_lines, i, shadow_line;
+ struct qed_ilt_client_cfg *p_cli;
+ struct qed_ilt_cli_blk *p_fl_seg;
+ struct qed_tid_seg *p_seg_info;
+
+ /* Verify the personality */
+ switch (p_hwfn->hw_info.personality) {
+ case QED_PCI_FCOE:
+ proto = PROTOCOLID_FCOE;
+ seg = QED_CXT_FCOE_TID_SEG;
+ break;
+ case QED_PCI_ISCSI:
+ case QED_PCI_NVMETCP:
+ proto = PROTOCOLID_TCP_ULP;
+ seg = QED_CXT_TCP_ULP_TID_SEG;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ p_cli = &p_mngr->clients[ILT_CLI_CDUT];
+ if (!p_cli->active)
+ return -EINVAL;
+
+ p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg];
+ if (!p_seg_info->has_fl_mem)
+ return -EINVAL;
+
+ p_fl_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)];
+ total_lines = DIV_ROUND_UP(p_fl_seg->total_size,
+ p_fl_seg->real_size_in_page);
+
+ for (i = 0; i < total_lines; i++) {
+ shadow_line = i + p_fl_seg->start_line -
+ p_hwfn->p_cxt_mngr->pf_start_line;
+ p_info->blocks[i] = p_mngr->ilt_shadow[shadow_line].virt_addr;
+ }
+ p_info->waste = ILT_PAGE_IN_BYTES(p_cli->p_size.val) -
+ p_fl_seg->real_size_in_page;
+ p_info->tid_size = p_mngr->task_type_size[p_seg_info->type];
+ p_info->num_tids_per_block = p_fl_seg->real_size_in_page /
+ p_info->tid_size;
+
+ return 0;
+}
+
+/* This function is very RoCE oriented, if another protocol in the future
+ * will want this feature we'll need to modify the function to be more generic
+ */
+int
+qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
+ enum qed_cxt_elem_type elem_type, u32 iid)
+{
+ u32 reg_offset, shadow_line, elem_size, hw_p_size, elems_per_p, line;
+ struct tdif_task_context *tdif_context;
+ struct qed_ilt_client_cfg *p_cli;
+ struct qed_ilt_cli_blk *p_blk;
+ struct qed_ptt *p_ptt;
+ dma_addr_t p_phys;
+ u64 ilt_hw_entry;
+ void *p_virt;
+ u32 flags1;
+ int rc = 0;
+
+ switch (elem_type) {
+ case QED_ELEM_CXT:
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
+ elem_size = CONN_CXT_SIZE(p_hwfn);
+ p_blk = &p_cli->pf_blks[CDUC_BLK];
+ break;
+ case QED_ELEM_SRQ:
+ /* The first ILT page is not used for regular SRQs. Skip it. */
+ iid += p_hwfn->p_cxt_mngr->xrc_srq_count;
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
+ elem_size = SRQ_CXT_SIZE;
+ p_blk = &p_cli->pf_blks[SRQ_BLK];
+ break;
+ case QED_ELEM_XRC_SRQ:
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
+ elem_size = XRC_SRQ_CXT_SIZE;
+ p_blk = &p_cli->pf_blks[SRQ_BLK];
+ break;
+ case QED_ELEM_TASK:
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
+ elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
+ p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(QED_CXT_ROCE_TID_SEG)];
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "-EOPNOTSUPP elem type = %d", elem_type);
+ return -EOPNOTSUPP;
+ }
+
+ /* Calculate line in ilt */
+ hw_p_size = p_cli->p_size.val;
+ elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size;
+ line = p_blk->start_line + (iid / elems_per_p);
+ shadow_line = line - p_hwfn->p_cxt_mngr->pf_start_line;
+
+ /* If line is already allocated, do nothing, otherwise allocate it and
+ * write it to the PSWRQ2 registers.
+ * This section can be run in parallel from different contexts and thus
+ * a mutex protection is needed.
+ */
+
+ mutex_lock(&p_hwfn->p_cxt_mngr->mutex);
+
+ if (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].virt_addr)
+ goto out0;
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ DP_NOTICE(p_hwfn,
+ "QED_TIME_OUT on ptt acquire - dynamic allocation");
+ rc = -EBUSY;
+ goto out0;
+ }
+
+ p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ p_blk->real_size_in_page, &p_phys,
+ GFP_KERNEL);
+ if (!p_virt) {
+ rc = -ENOMEM;
+ goto out1;
+ }
+
+ /* configuration of refTagMask to 0xF is required for RoCE DIF MR only,
+ * to compensate for a HW bug, but it is configured even if DIF is not
+ * enabled. This is harmless and allows us to avoid a dedicated API. We
+ * configure the field for all of the contexts on the newly allocated
+ * page.
+ */
+ if (elem_type == QED_ELEM_TASK) {
+ u32 elem_i;
+ u8 *elem_start = (u8 *)p_virt;
+ union type1_task_context *elem;
+
+ for (elem_i = 0; elem_i < elems_per_p; elem_i++) {
+ elem = (union type1_task_context *)elem_start;
+ tdif_context = &elem->roce_ctx.tdif_context;
+
+ flags1 = le32_to_cpu(tdif_context->flags1);
+ SET_FIELD(flags1, TDIF_TASK_CONTEXT_REF_TAG_MASK, 0xf);
+ tdif_context->flags1 = cpu_to_le32(flags1);
+
+ elem_start += TYPE1_TASK_CXT_SIZE(p_hwfn);
+ }
+ }
+
+ p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].virt_addr = p_virt;
+ p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].phys_addr = p_phys;
+ p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].size =
+ p_blk->real_size_in_page;
+
+ /* compute absolute offset */
+ reg_offset = PSWRQ2_REG_ILT_MEMORY +
+ (line * ILT_REG_SIZE_IN_BYTES * ILT_ENTRY_IN_REGS);
+
+ ilt_hw_entry = 0;
+ SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
+ SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR,
+ (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].phys_addr
+ >> 12));
+
+ /* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a wide-bus */
+ qed_dmae_host2grc(p_hwfn, p_ptt, (u64) (uintptr_t)&ilt_hw_entry,
+ reg_offset, sizeof(ilt_hw_entry) / sizeof(u32),
+ NULL);
+
+ if (elem_type == QED_ELEM_CXT) {
+ u32 last_cid_allocated = (1 + (iid / elems_per_p)) *
+ elems_per_p;
+
+ /* Update the relevant register in the parser */
+ qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF,
+ last_cid_allocated - 1);
+
+ if (!p_hwfn->b_rdma_enabled_in_prs) {
+ /* Enable RDMA search */
+ qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1);
+ p_hwfn->b_rdma_enabled_in_prs = true;
+ }
+ }
+
+out1:
+ qed_ptt_release(p_hwfn, p_ptt);
+out0:
+ mutex_unlock(&p_hwfn->p_cxt_mngr->mutex);
+
+ return rc;
+}
+
+/* This function is very RoCE oriented, if another protocol in the future
+ * will want this feature we'll need to modify the function to be more generic
+ */
+static int
+qed_cxt_free_ilt_range(struct qed_hwfn *p_hwfn,
+ enum qed_cxt_elem_type elem_type,
+ u32 start_iid, u32 count)
+{
+ u32 start_line, end_line, shadow_start_line, shadow_end_line;
+ u32 reg_offset, elem_size, hw_p_size, elems_per_p;
+ struct qed_ilt_client_cfg *p_cli;
+ struct qed_ilt_cli_blk *p_blk;
+ u32 end_iid = start_iid + count;
+ struct qed_ptt *p_ptt;
+ u64 ilt_hw_entry = 0;
+ u32 i;
+
+ switch (elem_type) {
+ case QED_ELEM_CXT:
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
+ elem_size = CONN_CXT_SIZE(p_hwfn);
+ p_blk = &p_cli->pf_blks[CDUC_BLK];
+ break;
+ case QED_ELEM_SRQ:
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
+ elem_size = SRQ_CXT_SIZE;
+ p_blk = &p_cli->pf_blks[SRQ_BLK];
+ break;
+ case QED_ELEM_XRC_SRQ:
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
+ elem_size = XRC_SRQ_CXT_SIZE;
+ p_blk = &p_cli->pf_blks[SRQ_BLK];
+ break;
+ case QED_ELEM_TASK:
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
+ elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
+ p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(QED_CXT_ROCE_TID_SEG)];
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "-EINVALID elem type = %d", elem_type);
+ return -EINVAL;
+ }
+
+ /* Calculate line in ilt */
+ hw_p_size = p_cli->p_size.val;
+ elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size;
+ start_line = p_blk->start_line + (start_iid / elems_per_p);
+ end_line = p_blk->start_line + (end_iid / elems_per_p);
+ if (((end_iid + 1) / elems_per_p) != (end_iid / elems_per_p))
+ end_line--;
+
+ shadow_start_line = start_line - p_hwfn->p_cxt_mngr->pf_start_line;
+ shadow_end_line = end_line - p_hwfn->p_cxt_mngr->pf_start_line;
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ DP_NOTICE(p_hwfn,
+ "QED_TIME_OUT on ptt acquire - dynamic allocation");
+ return -EBUSY;
+ }
+
+ for (i = shadow_start_line; i < shadow_end_line; i++) {
+ if (!p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr)
+ continue;
+
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].size,
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr,
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].phys_addr);
+
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr = NULL;
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].phys_addr = 0;
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].size = 0;
+
+ /* compute absolute offset */
+ reg_offset = PSWRQ2_REG_ILT_MEMORY +
+ ((start_line++) * ILT_REG_SIZE_IN_BYTES *
+ ILT_ENTRY_IN_REGS);
+
+ /* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a
+ * wide-bus.
+ */
+ qed_dmae_host2grc(p_hwfn, p_ptt,
+ (u64) (uintptr_t) &ilt_hw_entry,
+ reg_offset,
+ sizeof(ilt_hw_entry) / sizeof(u32),
+ NULL);
+ }
+
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ return 0;
+}
+
+int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto)
+{
+ int rc;
+ u32 cid;
+
+ /* Free Connection CXT */
+ rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_CXT,
+ qed_cxt_get_proto_cid_start(p_hwfn,
+ proto),
+ qed_cxt_get_proto_cid_count(p_hwfn,
+ proto, &cid));
+
+ if (rc)
+ return rc;
+
+ /* Free Task CXT ( Intentionally RoCE as task-id is shared between
+ * RoCE and iWARP )
+ */
+ proto = PROTOCOLID_ROCE;
+ rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_TASK, 0,
+ qed_cxt_get_proto_tid_count(p_hwfn, proto));
+ if (rc)
+ return rc;
+
+ /* Free TSDM CXT */
+ rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_XRC_SRQ, 0,
+ p_hwfn->p_cxt_mngr->xrc_srq_count);
+
+ rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_SRQ,
+ p_hwfn->p_cxt_mngr->xrc_srq_count,
+ p_hwfn->p_cxt_mngr->srq_count);
+
+ return rc;
+}
+
+int qed_cxt_get_task_ctx(struct qed_hwfn *p_hwfn,
+ u32 tid, u8 ctx_type, void **pp_task_ctx)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct qed_ilt_client_cfg *p_cli;
+ struct qed_tid_seg *p_seg_info;
+ struct qed_ilt_cli_blk *p_seg;
+ u32 num_tids_per_block;
+ u32 tid_size, ilt_idx;
+ u32 total_lines;
+ u32 proto, seg;
+
+ /* Verify the personality */
+ switch (p_hwfn->hw_info.personality) {
+ case QED_PCI_FCOE:
+ proto = PROTOCOLID_FCOE;
+ seg = QED_CXT_FCOE_TID_SEG;
+ break;
+ case QED_PCI_ISCSI:
+ case QED_PCI_NVMETCP:
+ proto = PROTOCOLID_TCP_ULP;
+ seg = QED_CXT_TCP_ULP_TID_SEG;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ p_cli = &p_mngr->clients[ILT_CLI_CDUT];
+ if (!p_cli->active)
+ return -EINVAL;
+
+ p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg];
+
+ if (ctx_type == QED_CTX_WORKING_MEM) {
+ p_seg = &p_cli->pf_blks[CDUT_SEG_BLK(seg)];
+ } else if (ctx_type == QED_CTX_FL_MEM) {
+ if (!p_seg_info->has_fl_mem)
+ return -EINVAL;
+ p_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)];
+ } else {
+ return -EINVAL;
+ }
+ total_lines = DIV_ROUND_UP(p_seg->total_size, p_seg->real_size_in_page);
+ tid_size = p_mngr->task_type_size[p_seg_info->type];
+ num_tids_per_block = p_seg->real_size_in_page / tid_size;
+
+ if (total_lines < tid / num_tids_per_block)
+ return -EINVAL;
+
+ ilt_idx = tid / num_tids_per_block + p_seg->start_line -
+ p_mngr->pf_start_line;
+ *pp_task_ctx = (u8 *)p_mngr->ilt_shadow[ilt_idx].virt_addr +
+ (tid % num_tids_per_block) * tid_size;
+
+ return 0;
+}
+
+static u16 qed_blk_calculate_pages(struct qed_ilt_cli_blk *p_blk)
+{
+ if (p_blk->real_size_in_page == 0)
+ return 0;
+
+ return DIV_ROUND_UP(p_blk->total_size, p_blk->real_size_in_page);
+}
+
+u16 qed_get_cdut_num_pf_init_pages(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ilt_client_cfg *p_cli;
+ struct qed_ilt_cli_blk *p_blk;
+ u16 i, pages = 0;
+
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
+ for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
+ p_blk = &p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)];
+ pages += qed_blk_calculate_pages(p_blk);
+ }
+
+ return pages;
+}
+
+u16 qed_get_cdut_num_vf_init_pages(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ilt_client_cfg *p_cli;
+ struct qed_ilt_cli_blk *p_blk;
+ u16 i, pages = 0;
+
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
+ for (i = 0; i < NUM_TASK_VF_SEGMENTS; i++) {
+ p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(i, VF)];
+ pages += qed_blk_calculate_pages(p_blk);
+ }
+
+ return pages;
+}
+
+u16 qed_get_cdut_num_pf_work_pages(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ilt_client_cfg *p_cli;
+ struct qed_ilt_cli_blk *p_blk;
+ u16 i, pages = 0;
+
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
+ for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
+ p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(i)];
+ pages += qed_blk_calculate_pages(p_blk);
+ }
+
+ return pages;
+}
+
+u16 qed_get_cdut_num_vf_work_pages(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ilt_client_cfg *p_cli;
+ struct qed_ilt_cli_blk *p_blk;
+ u16 pages = 0, i;
+
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
+ for (i = 0; i < NUM_TASK_VF_SEGMENTS; i++) {
+ p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(i)];
+ pages += qed_blk_calculate_pages(p_blk);
+ }
+
+ return pages;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
new file mode 100644
index 000000000..168ce2c50
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
@@ -0,0 +1,367 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ * Copyright (c) 2019-2020 Marvell International Ltd.
+ */
+
+#ifndef _QED_CXT_H
+#define _QED_CXT_H
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/qed/qed_if.h>
+#include "qed_hsi.h"
+#include "qed.h"
+
+struct qed_cxt_info {
+ void *p_cxt;
+ u32 iid;
+ enum protocol_type type;
+};
+
+#define MAX_TID_BLOCKS 512
+struct qed_tid_mem {
+ u32 tid_size;
+ u32 num_tids_per_block;
+ u32 waste;
+ u8 *blocks[MAX_TID_BLOCKS]; /* 4K */
+};
+
+/**
+ * qed_cxt_get_cid_info(): Returns the context info for a specific cidi.
+ *
+ * @p_hwfn: HW device data.
+ * @p_info: In/out.
+ *
+ * Return: Int.
+ */
+int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn,
+ struct qed_cxt_info *p_info);
+
+/**
+ * qed_cxt_get_tid_mem_info(): Returns the tid mem info.
+ *
+ * @p_hwfn: HW device data.
+ * @p_info: in/out.
+ *
+ * Return: int.
+ */
+int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn,
+ struct qed_tid_mem *p_info);
+
+#define QED_CXT_TCP_ULP_TID_SEG PROTOCOLID_TCP_ULP
+#define QED_CXT_ROCE_TID_SEG PROTOCOLID_ROCE
+#define QED_CXT_FCOE_TID_SEG PROTOCOLID_FCOE
+enum qed_cxt_elem_type {
+ QED_ELEM_CXT,
+ QED_ELEM_SRQ,
+ QED_ELEM_TASK,
+ QED_ELEM_XRC_SRQ,
+};
+
+u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
+ enum protocol_type type, u32 *vf_cid);
+
+/**
+ * qed_cxt_set_pf_params(): Set the PF params for cxt init.
+ *
+ * @p_hwfn: HW device data.
+ * @rdma_tasks: Requested maximum.
+ *
+ * Return: int.
+ */
+int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks);
+
+/**
+ * qed_cxt_cfg_ilt_compute(): Compute ILT init parameters.
+ *
+ * @p_hwfn: HW device data.
+ * @last_line: Last_line.
+ *
+ * Return: Int
+ */
+int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *last_line);
+
+/**
+ * qed_cxt_cfg_ilt_compute_excess(): How many lines can be decreased.
+ *
+ * @p_hwfn: HW device data.
+ * @used_lines: Used lines.
+ *
+ * Return: Int.
+ */
+u32 qed_cxt_cfg_ilt_compute_excess(struct qed_hwfn *p_hwfn, u32 used_lines);
+
+/**
+ * qed_cxt_mngr_alloc(): Allocate and init the context manager struct.
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Return: Int.
+ */
+int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * qed_cxt_mngr_free() - Context manager free.
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Return: Void.
+ */
+void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn);
+
+/**
+ * qed_cxt_tables_alloc(): Allocate ILT shadow, Searcher T2, acquired map.
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Return: Int.
+ */
+int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * qed_cxt_mngr_setup(): Reset the acquired CIDs.
+ *
+ * @p_hwfn: HW device data.
+ */
+void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn);
+
+/**
+ * qed_cxt_hw_init_common(): Initailze ILT and DQ, common phase, per path.
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Return: Void.
+ */
+void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn);
+
+/**
+ * qed_cxt_hw_init_pf(): Initailze ILT and DQ, PF phase, per path.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ *
+ * Return: Void.
+ */
+void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+/**
+ * qed_qm_init_pf(): Initailze the QM PF phase, per path.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @is_pf_loading: Is pf pending.
+ *
+ * Return: Void.
+ */
+void qed_qm_init_pf(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, bool is_pf_loading);
+
+/**
+ * qed_qm_reconf(): Reconfigures QM pf on the fly.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ *
+ * Return: Int.
+ */
+int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+#define QED_CXT_PF_CID (0xff)
+
+/**
+ * qed_cxt_release_cid(): Release a cid.
+ *
+ * @p_hwfn: HW device data.
+ * @cid: Cid.
+ *
+ * Return: Void.
+ */
+void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid);
+
+/**
+ * _qed_cxt_release_cid(): Release a cid belonging to a vf-queue.
+ *
+ * @p_hwfn: HW device data.
+ * @cid: Cid.
+ * @vfid: Engine relative index. QED_CXT_PF_CID if belongs to PF.
+ *
+ * Return: Void.
+ */
+void _qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid, u8 vfid);
+
+/**
+ * qed_cxt_acquire_cid(): Acquire a new cid of a specific protocol type.
+ *
+ * @p_hwfn: HW device data.
+ * @type: Type.
+ * @p_cid: Pointer cid.
+ *
+ * Return: Int.
+ */
+int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
+ enum protocol_type type, u32 *p_cid);
+
+/**
+ * _qed_cxt_acquire_cid(): Acquire a new cid of a specific protocol type
+ * for a vf-queue.
+ *
+ * @p_hwfn: HW device data.
+ * @type: Type.
+ * @p_cid: Pointer cid.
+ * @vfid: Engine relative index. QED_CXT_PF_CID if belongs to PF.
+ *
+ * Return: Int.
+ */
+int _qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
+ enum protocol_type type, u32 *p_cid, u8 vfid);
+
+int qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
+ enum qed_cxt_elem_type elem_type, u32 iid);
+u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn,
+ enum protocol_type type);
+u32 qed_cxt_get_proto_cid_start(struct qed_hwfn *p_hwfn,
+ enum protocol_type type);
+int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto);
+
+#define QED_CTX_WORKING_MEM 0
+#define QED_CTX_FL_MEM 1
+int qed_cxt_get_task_ctx(struct qed_hwfn *p_hwfn,
+ u32 tid, u8 ctx_type, void **task_ctx);
+
+/* Max number of connection types in HW (DQ/CDU etc.) */
+#define MAX_CONN_TYPES PROTOCOLID_COMMON
+#define NUM_TASK_TYPES 2
+#define NUM_TASK_PF_SEGMENTS 4
+#define NUM_TASK_VF_SEGMENTS 1
+
+/* PF per protocl configuration object */
+#define TASK_SEGMENTS (NUM_TASK_PF_SEGMENTS + NUM_TASK_VF_SEGMENTS)
+#define TASK_SEGMENT_VF (NUM_TASK_PF_SEGMENTS)
+
+struct qed_tid_seg {
+ u32 count;
+ u8 type;
+ bool has_fl_mem;
+};
+
+struct qed_conn_type_cfg {
+ u32 cid_count;
+ u32 cids_per_vf;
+ struct qed_tid_seg tid_seg[TASK_SEGMENTS];
+};
+
+/* ILT Client configuration,
+ * Per connection type (protocol) resources (cids, tis, vf cids etc.)
+ * 1 - for connection context (CDUC) and for each task context we need two
+ * values, for regular task context and for force load memory
+ */
+#define ILT_CLI_PF_BLOCKS (1 + NUM_TASK_PF_SEGMENTS * 2)
+#define ILT_CLI_VF_BLOCKS (1 + NUM_TASK_VF_SEGMENTS * 2)
+#define CDUC_BLK (0)
+#define SRQ_BLK (0)
+#define CDUT_SEG_BLK(n) (1 + (u8)(n))
+#define CDUT_FL_SEG_BLK(n, X) (1 + (n) + NUM_TASK_ ## X ## _SEGMENTS)
+
+struct ilt_cfg_pair {
+ u32 reg;
+ u32 val;
+};
+
+struct qed_ilt_cli_blk {
+ u32 total_size; /* 0 means not active */
+ u32 real_size_in_page;
+ u32 start_line;
+ u32 dynamic_line_offset;
+ u32 dynamic_line_cnt;
+};
+
+struct qed_ilt_client_cfg {
+ bool active;
+
+ /* ILT boundaries */
+ struct ilt_cfg_pair first;
+ struct ilt_cfg_pair last;
+ struct ilt_cfg_pair p_size;
+
+ /* ILT client blocks for PF */
+ struct qed_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS];
+ u32 pf_total_lines;
+
+ /* ILT client blocks for VFs */
+ struct qed_ilt_cli_blk vf_blks[ILT_CLI_VF_BLOCKS];
+ u32 vf_total_lines;
+};
+
+struct qed_cid_acquired_map {
+ u32 start_cid;
+ u32 max_count;
+ unsigned long *cid_map;
+};
+
+struct qed_src_t2 {
+ struct phys_mem_desc *dma_mem;
+ u32 num_pages;
+ u64 first_free;
+ u64 last_free;
+};
+
+struct qed_cxt_mngr {
+ /* Per protocl configuration */
+ struct qed_conn_type_cfg conn_cfg[MAX_CONN_TYPES];
+
+ /* computed ILT structure */
+ struct qed_ilt_client_cfg clients[MAX_ILT_CLIENTS];
+
+ /* Task type sizes */
+ u32 task_type_size[NUM_TASK_TYPES];
+
+ /* total number of VFs for this hwfn -
+ * ALL VFs are symmetric in terms of HW resources
+ */
+ u32 vf_count;
+ u32 first_vf_in_pf;
+
+ /* Acquired CIDs */
+ struct qed_cid_acquired_map acquired[MAX_CONN_TYPES];
+
+ struct qed_cid_acquired_map
+ acquired_vf[MAX_CONN_TYPES][MAX_NUM_VFS];
+
+ /* ILT shadow table */
+ struct phys_mem_desc *ilt_shadow;
+ u32 ilt_shadow_size;
+ u32 pf_start_line;
+
+ /* Mutex for a dynamic ILT allocation */
+ struct mutex mutex;
+
+ /* SRC T2 */
+ struct qed_src_t2 src_t2;
+
+ /* total number of SRQ's for this hwfn */
+ u32 srq_count;
+ u32 xrc_srq_count;
+
+ /* Maximal number of L2 steering filters */
+ u32 arfs_count;
+
+ u16 iscsi_task_pages;
+ u16 fcoe_task_pages;
+ u16 roce_task_pages;
+ u16 eth_task_pages;
+ u16 task_ctx_size;
+ u16 conn_ctx_size;
+};
+
+u16 qed_get_cdut_num_pf_init_pages(struct qed_hwfn *p_hwfn);
+u16 qed_get_cdut_num_vf_init_pages(struct qed_hwfn *p_hwfn);
+u16 qed_get_cdut_num_pf_work_pages(struct qed_hwfn *p_hwfn);
+u16 qed_get_cdut_num_vf_work_pages(struct qed_hwfn *p_hwfn);
+
+u32 qed_cxt_get_ilt_page_size(struct qed_hwfn *p_hwfn,
+ enum ilt_clients ilt_client);
+
+u32 qed_cxt_get_total_srq_count(struct qed_hwfn *p_hwfn);
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h
new file mode 100644
index 000000000..f6cd1b3ef
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h
@@ -0,0 +1,1491 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* QLogic qed NIC Driver
+ * Copyright (c) 2019-2021 Marvell International Ltd.
+ */
+#ifndef _QED_DBG_HSI_H
+#define _QED_DBG_HSI_H
+
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+
+/****************************************/
+/* Debug Tools HSI constants and macros */
+/****************************************/
+
+enum block_id {
+ BLOCK_GRC,
+ BLOCK_MISCS,
+ BLOCK_MISC,
+ BLOCK_DBU,
+ BLOCK_PGLUE_B,
+ BLOCK_CNIG,
+ BLOCK_CPMU,
+ BLOCK_NCSI,
+ BLOCK_OPTE,
+ BLOCK_BMB,
+ BLOCK_PCIE,
+ BLOCK_MCP,
+ BLOCK_MCP2,
+ BLOCK_PSWHST,
+ BLOCK_PSWHST2,
+ BLOCK_PSWRD,
+ BLOCK_PSWRD2,
+ BLOCK_PSWWR,
+ BLOCK_PSWWR2,
+ BLOCK_PSWRQ,
+ BLOCK_PSWRQ2,
+ BLOCK_PGLCS,
+ BLOCK_DMAE,
+ BLOCK_PTU,
+ BLOCK_TCM,
+ BLOCK_MCM,
+ BLOCK_UCM,
+ BLOCK_XCM,
+ BLOCK_YCM,
+ BLOCK_PCM,
+ BLOCK_QM,
+ BLOCK_TM,
+ BLOCK_DORQ,
+ BLOCK_BRB,
+ BLOCK_SRC,
+ BLOCK_PRS,
+ BLOCK_TSDM,
+ BLOCK_MSDM,
+ BLOCK_USDM,
+ BLOCK_XSDM,
+ BLOCK_YSDM,
+ BLOCK_PSDM,
+ BLOCK_TSEM,
+ BLOCK_MSEM,
+ BLOCK_USEM,
+ BLOCK_XSEM,
+ BLOCK_YSEM,
+ BLOCK_PSEM,
+ BLOCK_RSS,
+ BLOCK_TMLD,
+ BLOCK_MULD,
+ BLOCK_YULD,
+ BLOCK_XYLD,
+ BLOCK_PRM,
+ BLOCK_PBF_PB1,
+ BLOCK_PBF_PB2,
+ BLOCK_RPB,
+ BLOCK_BTB,
+ BLOCK_PBF,
+ BLOCK_RDIF,
+ BLOCK_TDIF,
+ BLOCK_CDU,
+ BLOCK_CCFC,
+ BLOCK_TCFC,
+ BLOCK_IGU,
+ BLOCK_CAU,
+ BLOCK_UMAC,
+ BLOCK_XMAC,
+ BLOCK_MSTAT,
+ BLOCK_DBG,
+ BLOCK_NIG,
+ BLOCK_WOL,
+ BLOCK_BMBN,
+ BLOCK_IPC,
+ BLOCK_NWM,
+ BLOCK_NWS,
+ BLOCK_MS,
+ BLOCK_PHY_PCIE,
+ BLOCK_LED,
+ BLOCK_AVS_WRAP,
+ BLOCK_PXPREQBUS,
+ BLOCK_BAR0_MAP,
+ BLOCK_MCP_FIO,
+ BLOCK_LAST_INIT,
+ BLOCK_PRS_FC,
+ BLOCK_PBF_FC,
+ BLOCK_NIG_LB_FC,
+ BLOCK_NIG_LB_FC_PLLH,
+ BLOCK_NIG_TX_FC_PLLH,
+ BLOCK_NIG_TX_FC,
+ BLOCK_NIG_RX_FC_PLLH,
+ BLOCK_NIG_RX_FC,
+ MAX_BLOCK_ID
+};
+
+/* binary debug buffer types */
+enum bin_dbg_buffer_type {
+ BIN_BUF_DBG_MODE_TREE,
+ BIN_BUF_DBG_DUMP_REG,
+ BIN_BUF_DBG_DUMP_MEM,
+ BIN_BUF_DBG_IDLE_CHK_REGS,
+ BIN_BUF_DBG_IDLE_CHK_IMMS,
+ BIN_BUF_DBG_IDLE_CHK_RULES,
+ BIN_BUF_DBG_IDLE_CHK_PARSING_DATA,
+ BIN_BUF_DBG_ATTN_BLOCKS,
+ BIN_BUF_DBG_ATTN_REGS,
+ BIN_BUF_DBG_ATTN_INDEXES,
+ BIN_BUF_DBG_ATTN_NAME_OFFSETS,
+ BIN_BUF_DBG_BLOCKS,
+ BIN_BUF_DBG_BLOCKS_CHIP_DATA,
+ BIN_BUF_DBG_BUS_LINES,
+ BIN_BUF_DBG_BLOCKS_USER_DATA,
+ BIN_BUF_DBG_BLOCKS_CHIP_USER_DATA,
+ BIN_BUF_DBG_BUS_LINE_NAME_OFFSETS,
+ BIN_BUF_DBG_RESET_REGS,
+ BIN_BUF_DBG_PARSING_STRINGS,
+ MAX_BIN_DBG_BUFFER_TYPE
+};
+
+/* Attention bit mapping */
+struct dbg_attn_bit_mapping {
+ u16 data;
+#define DBG_ATTN_BIT_MAPPING_VAL_MASK 0x7FFF
+#define DBG_ATTN_BIT_MAPPING_VAL_SHIFT 0
+#define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_MASK 0x1
+#define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_SHIFT 15
+};
+
+/* Attention block per-type data */
+struct dbg_attn_block_type_data {
+ u16 names_offset;
+ u16 reserved1;
+ u8 num_regs;
+ u8 reserved2;
+ u16 regs_offset;
+
+};
+
+/* Block attentions */
+struct dbg_attn_block {
+ struct dbg_attn_block_type_data per_type_data[2];
+};
+
+/* Attention register result */
+struct dbg_attn_reg_result {
+ u32 data;
+#define DBG_ATTN_REG_RESULT_STS_ADDRESS_MASK 0xFFFFFF
+#define DBG_ATTN_REG_RESULT_STS_ADDRESS_SHIFT 0
+#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_MASK 0xFF
+#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_SHIFT 24
+ u16 block_attn_offset;
+ u16 reserved;
+ u32 sts_val;
+ u32 mask_val;
+};
+
+/* Attention block result */
+struct dbg_attn_block_result {
+ u8 block_id;
+ u8 data;
+#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_MASK 0x3
+#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_SHIFT 0
+#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_MASK 0x3F
+#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_SHIFT 2
+ u16 names_offset;
+ struct dbg_attn_reg_result reg_results[15];
+};
+
+/* Mode header */
+struct dbg_mode_hdr {
+ u16 data;
+#define DBG_MODE_HDR_EVAL_MODE_MASK 0x1
+#define DBG_MODE_HDR_EVAL_MODE_SHIFT 0
+#define DBG_MODE_HDR_MODES_BUF_OFFSET_MASK 0x7FFF
+#define DBG_MODE_HDR_MODES_BUF_OFFSET_SHIFT 1
+};
+
+/* Attention register */
+struct dbg_attn_reg {
+ struct dbg_mode_hdr mode;
+ u16 block_attn_offset;
+ u32 data;
+#define DBG_ATTN_REG_STS_ADDRESS_MASK 0xFFFFFF
+#define DBG_ATTN_REG_STS_ADDRESS_SHIFT 0
+#define DBG_ATTN_REG_NUM_REG_ATTN_MASK 0xFF
+#define DBG_ATTN_REG_NUM_REG_ATTN_SHIFT 24
+ u32 sts_clr_address;
+ u32 mask_address;
+};
+
+/* Attention types */
+enum dbg_attn_type {
+ ATTN_TYPE_INTERRUPT,
+ ATTN_TYPE_PARITY,
+ MAX_DBG_ATTN_TYPE
+};
+
+/* Block debug data */
+struct dbg_block {
+ u8 name[15];
+ u8 associated_storm_letter;
+};
+
+/* Chip-specific block debug data */
+struct dbg_block_chip {
+ u8 flags;
+#define DBG_BLOCK_CHIP_IS_REMOVED_MASK 0x1
+#define DBG_BLOCK_CHIP_IS_REMOVED_SHIFT 0
+#define DBG_BLOCK_CHIP_HAS_RESET_REG_MASK 0x1
+#define DBG_BLOCK_CHIP_HAS_RESET_REG_SHIFT 1
+#define DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP_MASK 0x1
+#define DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP_SHIFT 2
+#define DBG_BLOCK_CHIP_HAS_DBG_BUS_MASK 0x1
+#define DBG_BLOCK_CHIP_HAS_DBG_BUS_SHIFT 3
+#define DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS_MASK 0x1
+#define DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS_SHIFT 4
+#define DBG_BLOCK_CHIP_RESERVED0_MASK 0x7
+#define DBG_BLOCK_CHIP_RESERVED0_SHIFT 5
+ u8 dbg_client_id;
+ u8 reset_reg_id;
+ u8 reset_reg_bit_offset;
+ struct dbg_mode_hdr dbg_bus_mode;
+ u16 reserved1;
+ u8 reserved2;
+ u8 num_of_dbg_bus_lines;
+ u16 dbg_bus_lines_offset;
+ u32 dbg_select_reg_addr;
+ u32 dbg_dword_enable_reg_addr;
+ u32 dbg_shift_reg_addr;
+ u32 dbg_force_valid_reg_addr;
+ u32 dbg_force_frame_reg_addr;
+};
+
+/* Chip-specific block user debug data */
+struct dbg_block_chip_user {
+ u8 num_of_dbg_bus_lines;
+ u8 has_latency_events;
+ u16 names_offset;
+};
+
+/* Block user debug data */
+struct dbg_block_user {
+ u8 name[16];
+};
+
+/* Block Debug line data */
+struct dbg_bus_line {
+ u8 data;
+#define DBG_BUS_LINE_NUM_OF_GROUPS_MASK 0xF
+#define DBG_BUS_LINE_NUM_OF_GROUPS_SHIFT 0
+#define DBG_BUS_LINE_IS_256B_MASK 0x1
+#define DBG_BUS_LINE_IS_256B_SHIFT 4
+#define DBG_BUS_LINE_RESERVED_MASK 0x7
+#define DBG_BUS_LINE_RESERVED_SHIFT 5
+ u8 group_sizes;
+};
+
+/* Condition header for registers dump */
+struct dbg_dump_cond_hdr {
+ struct dbg_mode_hdr mode; /* Mode header */
+ u8 block_id; /* block ID */
+ u8 data_size; /* size in dwords of the data following this header */
+};
+
+/* Memory data for registers dump */
+struct dbg_dump_mem {
+ u32 dword0;
+#define DBG_DUMP_MEM_ADDRESS_MASK 0xFFFFFF
+#define DBG_DUMP_MEM_ADDRESS_SHIFT 0
+#define DBG_DUMP_MEM_MEM_GROUP_ID_MASK 0xFF
+#define DBG_DUMP_MEM_MEM_GROUP_ID_SHIFT 24
+ u32 dword1;
+#define DBG_DUMP_MEM_LENGTH_MASK 0xFFFFFF
+#define DBG_DUMP_MEM_LENGTH_SHIFT 0
+#define DBG_DUMP_MEM_WIDE_BUS_MASK 0x1
+#define DBG_DUMP_MEM_WIDE_BUS_SHIFT 24
+#define DBG_DUMP_MEM_RESERVED_MASK 0x7F
+#define DBG_DUMP_MEM_RESERVED_SHIFT 25
+};
+
+/* Register data for registers dump */
+struct dbg_dump_reg {
+ u32 data;
+#define DBG_DUMP_REG_ADDRESS_MASK 0x7FFFFF
+#define DBG_DUMP_REG_ADDRESS_SHIFT 0
+#define DBG_DUMP_REG_WIDE_BUS_MASK 0x1
+#define DBG_DUMP_REG_WIDE_BUS_SHIFT 23
+#define DBG_DUMP_REG_LENGTH_MASK 0xFF
+#define DBG_DUMP_REG_LENGTH_SHIFT 24
+};
+
+/* Split header for registers dump */
+struct dbg_dump_split_hdr {
+ u32 hdr;
+#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_MASK 0xFFFFFF
+#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_SHIFT 0
+#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_MASK 0xFF
+#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_SHIFT 24
+};
+
+/* Condition header for idle check */
+struct dbg_idle_chk_cond_hdr {
+ struct dbg_mode_hdr mode; /* Mode header */
+ u16 data_size; /* size in dwords of the data following this header */
+};
+
+/* Idle Check condition register */
+struct dbg_idle_chk_cond_reg {
+ u32 data;
+#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK 0x7FFFFF
+#define DBG_IDLE_CHK_COND_REG_ADDRESS_SHIFT 0
+#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_MASK 0x1
+#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_SHIFT 23
+#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_MASK 0xFF
+#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_SHIFT 24
+ u16 num_entries;
+ u8 entry_size;
+ u8 start_entry;
+};
+
+/* Idle Check info register */
+struct dbg_idle_chk_info_reg {
+ u32 data;
+#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK 0x7FFFFF
+#define DBG_IDLE_CHK_INFO_REG_ADDRESS_SHIFT 0
+#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_MASK 0x1
+#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_SHIFT 23
+#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_MASK 0xFF
+#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_SHIFT 24
+ u16 size; /* register size in dwords */
+ struct dbg_mode_hdr mode; /* Mode header */
+};
+
+/* Idle Check register */
+union dbg_idle_chk_reg {
+ struct dbg_idle_chk_cond_reg cond_reg; /* condition register */
+ struct dbg_idle_chk_info_reg info_reg; /* info register */
+};
+
+/* Idle Check result header */
+struct dbg_idle_chk_result_hdr {
+ u16 rule_id; /* Failing rule index */
+ u16 mem_entry_id; /* Failing memory entry index */
+ u8 num_dumped_cond_regs; /* number of dumped condition registers */
+ u8 num_dumped_info_regs; /* number of dumped condition registers */
+ u8 severity; /* from dbg_idle_chk_severity_types enum */
+ u8 reserved;
+};
+
+/* Idle Check result register header */
+struct dbg_idle_chk_result_reg_hdr {
+ u8 data;
+#define DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM_MASK 0x1
+#define DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM_SHIFT 0
+#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_MASK 0x7F
+#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_SHIFT 1
+ u8 start_entry; /* index of the first checked entry */
+ u16 size; /* register size in dwords */
+};
+
+/* Idle Check rule */
+struct dbg_idle_chk_rule {
+ u16 rule_id; /* Idle Check rule ID */
+ u8 severity; /* value from dbg_idle_chk_severity_types enum */
+ u8 cond_id; /* Condition ID */
+ u8 num_cond_regs; /* number of condition registers */
+ u8 num_info_regs; /* number of info registers */
+ u8 num_imms; /* number of immediates in the condition */
+ u8 reserved1;
+ u16 reg_offset; /* offset of this rules registers in the idle check
+ * register array (in dbg_idle_chk_reg units).
+ */
+ u16 imm_offset; /* offset of this rules immediate values in the
+ * immediate values array (in dwords).
+ */
+};
+
+/* Idle Check rule parsing data */
+struct dbg_idle_chk_rule_parsing_data {
+ u32 data;
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_MASK 0x1
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_SHIFT 0
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_MASK 0x7FFFFFFF
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_SHIFT 1
+};
+
+/* Idle check severity types */
+enum dbg_idle_chk_severity_types {
+ /* idle check failure should cause an error */
+ IDLE_CHK_SEVERITY_ERROR,
+ /* idle check failure should cause an error only if theres no traffic */
+ IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC,
+ /* idle check failure should cause a warning */
+ IDLE_CHK_SEVERITY_WARNING,
+ MAX_DBG_IDLE_CHK_SEVERITY_TYPES
+};
+
+/* Reset register */
+struct dbg_reset_reg {
+ u32 data;
+#define DBG_RESET_REG_ADDR_MASK 0xFFFFFF
+#define DBG_RESET_REG_ADDR_SHIFT 0
+#define DBG_RESET_REG_IS_REMOVED_MASK 0x1
+#define DBG_RESET_REG_IS_REMOVED_SHIFT 24
+#define DBG_RESET_REG_RESERVED_MASK 0x7F
+#define DBG_RESET_REG_RESERVED_SHIFT 25
+};
+
+/* Debug Bus block data */
+struct dbg_bus_block_data {
+ u8 enable_mask;
+ u8 right_shift;
+ u8 force_valid_mask;
+ u8 force_frame_mask;
+ u8 dword_mask;
+ u8 line_num;
+ u8 hw_id;
+ u8 flags;
+#define DBG_BUS_BLOCK_DATA_IS_256B_LINE_MASK 0x1
+#define DBG_BUS_BLOCK_DATA_IS_256B_LINE_SHIFT 0
+#define DBG_BUS_BLOCK_DATA_RESERVED_MASK 0x7F
+#define DBG_BUS_BLOCK_DATA_RESERVED_SHIFT 1
+};
+
+enum dbg_bus_clients {
+ DBG_BUS_CLIENT_RBCN,
+ DBG_BUS_CLIENT_RBCP,
+ DBG_BUS_CLIENT_RBCR,
+ DBG_BUS_CLIENT_RBCT,
+ DBG_BUS_CLIENT_RBCU,
+ DBG_BUS_CLIENT_RBCF,
+ DBG_BUS_CLIENT_RBCX,
+ DBG_BUS_CLIENT_RBCS,
+ DBG_BUS_CLIENT_RBCH,
+ DBG_BUS_CLIENT_RBCZ,
+ DBG_BUS_CLIENT_OTHER_ENGINE,
+ DBG_BUS_CLIENT_TIMESTAMP,
+ DBG_BUS_CLIENT_CPU,
+ DBG_BUS_CLIENT_RBCY,
+ DBG_BUS_CLIENT_RBCQ,
+ DBG_BUS_CLIENT_RBCM,
+ DBG_BUS_CLIENT_RBCB,
+ DBG_BUS_CLIENT_RBCW,
+ DBG_BUS_CLIENT_RBCV,
+ MAX_DBG_BUS_CLIENTS
+};
+
+/* Debug Bus constraint operation types */
+enum dbg_bus_constraint_ops {
+ DBG_BUS_CONSTRAINT_OP_EQ,
+ DBG_BUS_CONSTRAINT_OP_NE,
+ DBG_BUS_CONSTRAINT_OP_LT,
+ DBG_BUS_CONSTRAINT_OP_LTC,
+ DBG_BUS_CONSTRAINT_OP_LE,
+ DBG_BUS_CONSTRAINT_OP_LEC,
+ DBG_BUS_CONSTRAINT_OP_GT,
+ DBG_BUS_CONSTRAINT_OP_GTC,
+ DBG_BUS_CONSTRAINT_OP_GE,
+ DBG_BUS_CONSTRAINT_OP_GEC,
+ MAX_DBG_BUS_CONSTRAINT_OPS
+};
+
+/* Debug Bus trigger state data */
+struct dbg_bus_trigger_state_data {
+ u8 msg_len;
+ u8 constraint_dword_mask;
+ u8 storm_id;
+ u8 reserved;
+};
+
+/* Debug Bus memory address */
+struct dbg_bus_mem_addr {
+ u32 lo;
+ u32 hi;
+};
+
+/* Debug Bus PCI buffer data */
+struct dbg_bus_pci_buf_data {
+ struct dbg_bus_mem_addr phys_addr; /* PCI buffer physical address */
+ struct dbg_bus_mem_addr virt_addr; /* PCI buffer virtual address */
+ u32 size; /* PCI buffer size in bytes */
+};
+
+/* Debug Bus Storm EID range filter params */
+struct dbg_bus_storm_eid_range_params {
+ u8 min; /* Minimal event ID to filter on */
+ u8 max; /* Maximal event ID to filter on */
+};
+
+/* Debug Bus Storm EID mask filter params */
+struct dbg_bus_storm_eid_mask_params {
+ u8 val; /* Event ID value */
+ u8 mask; /* Event ID mask. 1s in the mask = dont care bits. */
+};
+
+/* Debug Bus Storm EID filter params */
+union dbg_bus_storm_eid_params {
+ struct dbg_bus_storm_eid_range_params range;
+ struct dbg_bus_storm_eid_mask_params mask;
+};
+
+/* Debug Bus Storm data */
+struct dbg_bus_storm_data {
+ u8 enabled;
+ u8 mode;
+ u8 hw_id;
+ u8 eid_filter_en;
+ u8 eid_range_not_mask;
+ u8 cid_filter_en;
+ union dbg_bus_storm_eid_params eid_filter_params;
+ u32 cid;
+};
+
+/* Debug Bus data */
+struct dbg_bus_data {
+ u32 app_version;
+ u8 state;
+ u8 mode_256b_en;
+ u8 num_enabled_blocks;
+ u8 num_enabled_storms;
+ u8 target;
+ u8 one_shot_en;
+ u8 grc_input_en;
+ u8 timestamp_input_en;
+ u8 filter_en;
+ u8 adding_filter;
+ u8 filter_pre_trigger;
+ u8 filter_post_trigger;
+ u8 trigger_en;
+ u8 filter_constraint_dword_mask;
+ u8 next_trigger_state;
+ u8 next_constraint_id;
+ struct dbg_bus_trigger_state_data trigger_states[3];
+ u8 filter_msg_len;
+ u8 rcv_from_other_engine;
+ u8 blocks_dword_mask;
+ u8 blocks_dword_overlap;
+ u32 hw_id_mask;
+ struct dbg_bus_pci_buf_data pci_buf;
+ struct dbg_bus_block_data blocks[132];
+ struct dbg_bus_storm_data storms[6];
+};
+
+/* Debug bus states */
+enum dbg_bus_states {
+ DBG_BUS_STATE_IDLE,
+ DBG_BUS_STATE_READY,
+ DBG_BUS_STATE_RECORDING,
+ DBG_BUS_STATE_STOPPED,
+ MAX_DBG_BUS_STATES
+};
+
+/* Debug Bus Storm modes */
+enum dbg_bus_storm_modes {
+ DBG_BUS_STORM_MODE_PRINTF,
+ DBG_BUS_STORM_MODE_PRAM_ADDR,
+ DBG_BUS_STORM_MODE_DRA_RW,
+ DBG_BUS_STORM_MODE_DRA_W,
+ DBG_BUS_STORM_MODE_LD_ST_ADDR,
+ DBG_BUS_STORM_MODE_DRA_FSM,
+ DBG_BUS_STORM_MODE_FAST_DBGMUX,
+ DBG_BUS_STORM_MODE_RH,
+ DBG_BUS_STORM_MODE_RH_WITH_STORE,
+ DBG_BUS_STORM_MODE_FOC,
+ DBG_BUS_STORM_MODE_EXT_STORE,
+ MAX_DBG_BUS_STORM_MODES
+};
+
+/* Debug bus target IDs */
+enum dbg_bus_targets {
+ DBG_BUS_TARGET_ID_INT_BUF,
+ DBG_BUS_TARGET_ID_NIG,
+ DBG_BUS_TARGET_ID_PCI,
+ MAX_DBG_BUS_TARGETS
+};
+
+/* GRC Dump data */
+struct dbg_grc_data {
+ u8 params_initialized;
+ u8 reserved1;
+ u16 reserved2;
+ u32 param_val[48];
+};
+
+/* Debug GRC params */
+enum dbg_grc_params {
+ DBG_GRC_PARAM_DUMP_TSTORM,
+ DBG_GRC_PARAM_DUMP_MSTORM,
+ DBG_GRC_PARAM_DUMP_USTORM,
+ DBG_GRC_PARAM_DUMP_XSTORM,
+ DBG_GRC_PARAM_DUMP_YSTORM,
+ DBG_GRC_PARAM_DUMP_PSTORM,
+ DBG_GRC_PARAM_DUMP_REGS,
+ DBG_GRC_PARAM_DUMP_RAM,
+ DBG_GRC_PARAM_DUMP_PBUF,
+ DBG_GRC_PARAM_DUMP_IOR,
+ DBG_GRC_PARAM_DUMP_VFC,
+ DBG_GRC_PARAM_DUMP_CM_CTX,
+ DBG_GRC_PARAM_DUMP_PXP,
+ DBG_GRC_PARAM_DUMP_RSS,
+ DBG_GRC_PARAM_DUMP_CAU,
+ DBG_GRC_PARAM_DUMP_QM,
+ DBG_GRC_PARAM_DUMP_MCP,
+ DBG_GRC_PARAM_DUMP_DORQ,
+ DBG_GRC_PARAM_DUMP_CFC,
+ DBG_GRC_PARAM_DUMP_IGU,
+ DBG_GRC_PARAM_DUMP_BRB,
+ DBG_GRC_PARAM_DUMP_BTB,
+ DBG_GRC_PARAM_DUMP_BMB,
+ DBG_GRC_PARAM_RESERVD1,
+ DBG_GRC_PARAM_DUMP_MULD,
+ DBG_GRC_PARAM_DUMP_PRS,
+ DBG_GRC_PARAM_DUMP_DMAE,
+ DBG_GRC_PARAM_DUMP_TM,
+ DBG_GRC_PARAM_DUMP_SDM,
+ DBG_GRC_PARAM_DUMP_DIF,
+ DBG_GRC_PARAM_DUMP_STATIC,
+ DBG_GRC_PARAM_UNSTALL,
+ DBG_GRC_PARAM_RESERVED2,
+ DBG_GRC_PARAM_MCP_TRACE_META_SIZE,
+ DBG_GRC_PARAM_EXCLUDE_ALL,
+ DBG_GRC_PARAM_CRASH,
+ DBG_GRC_PARAM_PARITY_SAFE,
+ DBG_GRC_PARAM_DUMP_CM,
+ DBG_GRC_PARAM_DUMP_PHY,
+ DBG_GRC_PARAM_NO_MCP,
+ DBG_GRC_PARAM_NO_FW_VER,
+ DBG_GRC_PARAM_RESERVED3,
+ DBG_GRC_PARAM_DUMP_MCP_HW_DUMP,
+ DBG_GRC_PARAM_DUMP_ILT_CDUC,
+ DBG_GRC_PARAM_DUMP_ILT_CDUT,
+ DBG_GRC_PARAM_DUMP_CAU_EXT,
+ MAX_DBG_GRC_PARAMS
+};
+
+/* Debug status codes */
+enum dbg_status {
+ DBG_STATUS_OK,
+ DBG_STATUS_APP_VERSION_NOT_SET,
+ DBG_STATUS_UNSUPPORTED_APP_VERSION,
+ DBG_STATUS_DBG_BLOCK_NOT_RESET,
+ DBG_STATUS_INVALID_ARGS,
+ DBG_STATUS_OUTPUT_ALREADY_SET,
+ DBG_STATUS_INVALID_PCI_BUF_SIZE,
+ DBG_STATUS_PCI_BUF_ALLOC_FAILED,
+ DBG_STATUS_PCI_BUF_NOT_ALLOCATED,
+ DBG_STATUS_INVALID_FILTER_TRIGGER_DWORDS,
+ DBG_STATUS_NO_MATCHING_FRAMING_MODE,
+ DBG_STATUS_VFC_READ_ERROR,
+ DBG_STATUS_STORM_ALREADY_ENABLED,
+ DBG_STATUS_STORM_NOT_ENABLED,
+ DBG_STATUS_BLOCK_ALREADY_ENABLED,
+ DBG_STATUS_BLOCK_NOT_ENABLED,
+ DBG_STATUS_NO_INPUT_ENABLED,
+ DBG_STATUS_NO_FILTER_TRIGGER_256B,
+ DBG_STATUS_FILTER_ALREADY_ENABLED,
+ DBG_STATUS_TRIGGER_ALREADY_ENABLED,
+ DBG_STATUS_TRIGGER_NOT_ENABLED,
+ DBG_STATUS_CANT_ADD_CONSTRAINT,
+ DBG_STATUS_TOO_MANY_TRIGGER_STATES,
+ DBG_STATUS_TOO_MANY_CONSTRAINTS,
+ DBG_STATUS_RECORDING_NOT_STARTED,
+ DBG_STATUS_DATA_DIDNT_TRIGGER,
+ DBG_STATUS_NO_DATA_RECORDED,
+ DBG_STATUS_DUMP_BUF_TOO_SMALL,
+ DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED,
+ DBG_STATUS_UNKNOWN_CHIP,
+ DBG_STATUS_VIRT_MEM_ALLOC_FAILED,
+ DBG_STATUS_BLOCK_IN_RESET,
+ DBG_STATUS_INVALID_TRACE_SIGNATURE,
+ DBG_STATUS_INVALID_NVRAM_BUNDLE,
+ DBG_STATUS_NVRAM_GET_IMAGE_FAILED,
+ DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE,
+ DBG_STATUS_NVRAM_READ_FAILED,
+ DBG_STATUS_IDLE_CHK_PARSE_FAILED,
+ DBG_STATUS_MCP_TRACE_BAD_DATA,
+ DBG_STATUS_MCP_TRACE_NO_META,
+ DBG_STATUS_MCP_COULD_NOT_HALT,
+ DBG_STATUS_MCP_COULD_NOT_RESUME,
+ DBG_STATUS_RESERVED0,
+ DBG_STATUS_SEMI_FIFO_NOT_EMPTY,
+ DBG_STATUS_IGU_FIFO_BAD_DATA,
+ DBG_STATUS_MCP_COULD_NOT_MASK_PRTY,
+ DBG_STATUS_FW_ASSERTS_PARSE_FAILED,
+ DBG_STATUS_REG_FIFO_BAD_DATA,
+ DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA,
+ DBG_STATUS_DBG_ARRAY_NOT_SET,
+ DBG_STATUS_RESERVED1,
+ DBG_STATUS_NON_MATCHING_LINES,
+ DBG_STATUS_INSUFFICIENT_HW_IDS,
+ DBG_STATUS_DBG_BUS_IN_USE,
+ DBG_STATUS_INVALID_STORM_DBG_MODE,
+ DBG_STATUS_OTHER_ENGINE_BB_ONLY,
+ DBG_STATUS_FILTER_SINGLE_HW_ID,
+ DBG_STATUS_TRIGGER_SINGLE_HW_ID,
+ DBG_STATUS_MISSING_TRIGGER_STATE_STORM,
+ MAX_DBG_STATUS
+};
+
+/* Debug Storms IDs */
+enum dbg_storms {
+ DBG_TSTORM_ID,
+ DBG_MSTORM_ID,
+ DBG_USTORM_ID,
+ DBG_XSTORM_ID,
+ DBG_YSTORM_ID,
+ DBG_PSTORM_ID,
+ MAX_DBG_STORMS
+};
+
+/* Idle Check data */
+struct idle_chk_data {
+ u32 buf_size;
+ u8 buf_size_set;
+ u8 reserved1;
+ u16 reserved2;
+};
+
+struct pretend_params {
+ u8 split_type;
+ u8 reserved;
+ u16 split_id;
+};
+
+/* Debug Tools data (per HW function)
+ */
+struct dbg_tools_data {
+ struct dbg_grc_data grc;
+ struct dbg_bus_data bus;
+ struct idle_chk_data idle_chk;
+ u8 mode_enable[40];
+ u8 block_in_reset[132];
+ u8 chip_id;
+ u8 hw_type;
+ u8 num_ports;
+ u8 num_pfs_per_port;
+ u8 num_vfs;
+ u8 initialized;
+ u8 use_dmae;
+ u8 reserved;
+ struct pretend_params pretend;
+ u32 num_regs_read;
+};
+
+/* ILT Clients */
+enum ilt_clients {
+ ILT_CLI_CDUC,
+ ILT_CLI_CDUT,
+ ILT_CLI_QM,
+ ILT_CLI_TM,
+ ILT_CLI_SRC,
+ ILT_CLI_TSDM,
+ ILT_CLI_RGFS,
+ ILT_CLI_TGFS,
+ MAX_ILT_CLIENTS
+};
+
+/***************************** Public Functions *******************************/
+
+/**
+ * qed_dbg_set_bin_ptr(): Sets a pointer to the binary data with debug
+ * arrays.
+ *
+ * @p_hwfn: HW device data.
+ * @bin_ptr: A pointer to the binary data with debug arrays.
+ *
+ * Return: enum dbg status.
+ */
+enum dbg_status qed_dbg_set_bin_ptr(struct qed_hwfn *p_hwfn,
+ const u8 * const bin_ptr);
+
+/**
+ * qed_read_regs(): Reads registers into a buffer (using GRC).
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @buf: Destination buffer.
+ * @addr: Source GRC address in dwords.
+ * @len: Number of registers to read.
+ *
+ * Return: Void.
+ */
+void qed_read_regs(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len);
+
+/**
+ * qed_read_fw_info(): Reads FW info from the chip.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @fw_info: (Out) a pointer to write the FW info into.
+ *
+ * Return: True if the FW info was read successfully from one of the Storms,
+ * or false if all Storms are in reset.
+ *
+ * The FW info contains FW-related information, such as the FW version,
+ * FW image (main/L2B/kuku), FW timestamp, etc.
+ * The FW info is read from the internal RAM of the first Storm that is not in
+ * reset.
+ */
+bool qed_read_fw_info(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, struct fw_info *fw_info);
+/**
+ * qed_dbg_grc_config(): Sets the value of a GRC parameter.
+ *
+ * @p_hwfn: HW device data.
+ * @grc_param: GRC parameter.
+ * @val: Value to set.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set.
+ * - Grc_param is invalid.
+ * - Val is outside the allowed boundaries.
+ */
+enum dbg_status qed_dbg_grc_config(struct qed_hwfn *p_hwfn,
+ enum dbg_grc_params grc_param, u32 val);
+
+/**
+ * qed_dbg_grc_set_params_default(): Reverts all GRC parameters to their
+ * default value.
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Return: Void.
+ */
+void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn);
+/**
+ * qed_dbg_grc_get_dump_buf_size(): Returns the required buffer size for
+ * GRC Dump.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @buf_size: (OUT) required buffer size (in dwords) for the GRC Dump
+ * data.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+
+/**
+ * qed_dbg_grc_dump(): Dumps GRC data into the specified buffer.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @dump_buf: Pointer to write the collected GRC data into.
+ * @buf_size_in_dwords:Size of the specified buffer in dwords.
+ * @num_dumped_dwords: (OUT) number of dumped dwords.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set.
+ * - The specified dump buffer is too small.
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
+
+/**
+ * qed_dbg_idle_chk_get_dump_buf_size(): Returns the required buffer size
+ * for idle check results.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @buf_size: (OUT) required buffer size (in dwords) for the idle check
+ * data.
+ *
+ * return: Error if one of the following holds:
+ * - The version wasn't set.
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+
+/**
+ * qed_dbg_idle_chk_dump: Performs idle check and writes the results
+ * into the specified buffer.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @dump_buf: Pointer to write the idle check data into.
+ * @buf_size_in_dwords: Size of the specified buffer in dwords.
+ * @num_dumped_dwords: (OUT) number of dumped dwords.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set.
+ * - The specified buffer is too small.
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
+
+/**
+ * qed_dbg_mcp_trace_get_dump_buf_size(): Returns the required buffer size
+ * for mcp trace results.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @buf_size: (OUT) Required buffer size (in dwords) for mcp trace data.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set.
+ * - The trace data in MCP scratchpad contain an invalid signature.
+ * - The bundle ID in NVRAM is invalid.
+ * - The trace meta data cannot be found (in NVRAM or image file).
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+
+/**
+ * qed_dbg_mcp_trace_dump(): Performs mcp trace and writes the results
+ * into the specified buffer.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @dump_buf: Pointer to write the mcp trace data into.
+ * @buf_size_in_dwords: Size of the specified buffer in dwords.
+ * @num_dumped_dwords: (OUT) number of dumped dwords.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set.
+ * - The specified buffer is too small.
+ * - The trace data in MCP scratchpad contain an invalid signature.
+ * - The bundle ID in NVRAM is invalid.
+ * - The trace meta data cannot be found (in NVRAM or image file).
+ * - The trace meta data cannot be read (from NVRAM or image file).
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
+
+/**
+ * qed_dbg_reg_fifo_get_dump_buf_size(): Returns the required buffer size
+ * for grc trace fifo results.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @buf_size: (OUT) Required buffer size (in dwords) for reg fifo data.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+
+/**
+ * qed_dbg_reg_fifo_dump(): Reads the reg fifo and writes the results into
+ * the specified buffer.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @dump_buf: Pointer to write the reg fifo data into.
+ * @buf_size_in_dwords: Size of the specified buffer in dwords.
+ * @num_dumped_dwords: (OUT) number of dumped dwords.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set.
+ * - The specified buffer is too small.
+ * - DMAE transaction failed.
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
+
+/**
+ * qed_dbg_igu_fifo_get_dump_buf_size(): Returns the required buffer size
+ * for the IGU fifo results.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @buf_size: (OUT) Required buffer size (in dwords) for the IGU fifo
+ * data.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set.
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+
+/**
+ * qed_dbg_igu_fifo_dump(): Reads the IGU fifo and writes the results into
+ * the specified buffer.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @dump_buf: Pointer to write the IGU fifo data into.
+ * @buf_size_in_dwords: Size of the specified buffer in dwords.
+ * @num_dumped_dwords: (OUT) number of dumped dwords.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set
+ * - The specified buffer is too small
+ * - DMAE transaction failed
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
+
+/**
+ * qed_dbg_protection_override_get_dump_buf_size(): Returns the required
+ * buffer size for protection override window results.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @buf_size: (OUT) Required buffer size (in dwords) for protection
+ * override data.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set
+ * Otherwise, returns ok.
+ */
+enum dbg_status
+qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+/**
+ * qed_dbg_protection_override_dump(): Reads protection override window
+ * entries and writes the results into the specified buffer.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @dump_buf: Pointer to write the protection override data into.
+ * @buf_size_in_dwords: Size of the specified buffer in dwords.
+ * @num_dumped_dwords: (OUT) number of dumped dwords.
+ *
+ * @return: Error if one of the following holds:
+ * - The version wasn't set.
+ * - The specified buffer is too small.
+ * - DMAE transaction failed.
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
+/**
+ * qed_dbg_fw_asserts_get_dump_buf_size(): Returns the required buffer
+ * size for FW Asserts results.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @buf_size: (OUT) Required buffer size (in dwords) for FW Asserts data.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set.
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+/**
+ * qed_dbg_fw_asserts_dump(): Reads the FW Asserts and writes the results
+ * into the specified buffer.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @dump_buf: Pointer to write the FW Asserts data into.
+ * @buf_size_in_dwords: Size of the specified buffer in dwords.
+ * @num_dumped_dwords: (OUT) number of dumped dwords.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set.
+ * - The specified buffer is too small.
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
+
+/**
+ * qed_dbg_read_attn(): Reads the attention registers of the specified
+ * block and type, and writes the results into the specified buffer.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @block: Block ID.
+ * @attn_type: Attention type.
+ * @clear_status: Indicates if the attention status should be cleared.
+ * @results: (OUT) Pointer to write the read results into.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum block_id block,
+ enum dbg_attn_type attn_type,
+ bool clear_status,
+ struct dbg_attn_block_result *results);
+
+/**
+ * qed_dbg_print_attn(): Prints attention registers values in the
+ * specified results struct.
+ *
+ * @p_hwfn: HW device data.
+ * @results: Pointer to the attention read results
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn,
+ struct dbg_attn_block_result *results);
+
+/******************************* Data Types **********************************/
+
+struct mcp_trace_format {
+ u32 data;
+#define MCP_TRACE_FORMAT_MODULE_MASK 0x0000ffff
+#define MCP_TRACE_FORMAT_MODULE_OFFSET 0
+#define MCP_TRACE_FORMAT_LEVEL_MASK 0x00030000
+#define MCP_TRACE_FORMAT_LEVEL_OFFSET 16
+#define MCP_TRACE_FORMAT_P1_SIZE_MASK 0x000c0000
+#define MCP_TRACE_FORMAT_P1_SIZE_OFFSET 18
+#define MCP_TRACE_FORMAT_P2_SIZE_MASK 0x00300000
+#define MCP_TRACE_FORMAT_P2_SIZE_OFFSET 20
+#define MCP_TRACE_FORMAT_P3_SIZE_MASK 0x00c00000
+#define MCP_TRACE_FORMAT_P3_SIZE_OFFSET 22
+#define MCP_TRACE_FORMAT_LEN_MASK 0xff000000
+#define MCP_TRACE_FORMAT_LEN_OFFSET 24
+
+ char *format_str;
+};
+
+/* MCP Trace Meta data structure */
+struct mcp_trace_meta {
+ u32 modules_num;
+ char **modules;
+ u32 formats_num;
+ struct mcp_trace_format *formats;
+ bool is_allocated;
+};
+
+/* Debug Tools user data */
+struct dbg_tools_user_data {
+ struct mcp_trace_meta mcp_trace_meta;
+ const u32 *mcp_trace_user_meta_buf;
+};
+
+/******************************** Constants **********************************/
+
+#define MAX_NAME_LEN 16
+
+/***************************** Public Functions *******************************/
+
+/**
+ * qed_dbg_user_set_bin_ptr(): Sets a pointer to the binary data with
+ * debug arrays.
+ *
+ * @p_hwfn: HW device data.
+ * @bin_ptr: a pointer to the binary data with debug arrays.
+ *
+ * Return: dbg_status.
+ */
+enum dbg_status qed_dbg_user_set_bin_ptr(struct qed_hwfn *p_hwfn,
+ const u8 * const bin_ptr);
+
+/**
+ * qed_dbg_alloc_user_data(): Allocates user debug data.
+ *
+ * @p_hwfn: HW device data.
+ * @user_data_ptr: (OUT) a pointer to the allocated memory.
+ *
+ * Return: dbg_status.
+ */
+enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn,
+ void **user_data_ptr);
+
+/**
+ * qed_dbg_get_status_str(): Returns a string for the specified status.
+ *
+ * @status: A debug status code.
+ *
+ * Return: A string for the specified status.
+ */
+const char *qed_dbg_get_status_str(enum dbg_status status);
+
+/**
+ * qed_get_idle_chk_results_buf_size(): Returns the required buffer size
+ * for idle check results (in bytes).
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: idle check dump buffer.
+ * @num_dumped_dwords: number of dwords that were dumped.
+ * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed
+ * results.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size);
+/**
+ * qed_print_idle_chk_results(): Prints idle check results
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: idle check dump buffer.
+ * @num_dumped_dwords: number of dwords that were dumped.
+ * @results_buf: buffer for printing the idle check results.
+ * @num_errors: (OUT) number of errors found in idle check.
+ * @num_warnings: (OUT) number of warnings found in idle check.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf,
+ u32 *num_errors,
+ u32 *num_warnings);
+
+/**
+ * qed_dbg_mcp_trace_set_meta_data(): Sets the MCP Trace meta data.
+ *
+ * @p_hwfn: HW device data.
+ * @meta_buf: Meta buffer.
+ *
+ * Return: Void.
+ *
+ * Needed in case the MCP Trace dump doesn't contain the meta data (e.g. due to
+ * no NVRAM access).
+ */
+void qed_dbg_mcp_trace_set_meta_data(struct qed_hwfn *p_hwfn,
+ const u32 *meta_buf);
+
+/**
+ * qed_get_mcp_trace_results_buf_size(): Returns the required buffer size
+ * for MCP Trace results (in bytes).
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: MCP Trace dump buffer.
+ * @num_dumped_dwords: number of dwords that were dumped.
+ * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed
+ * results.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size);
+
+/**
+ * qed_print_mcp_trace_results(): Prints MCP Trace results
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: MCP trace dump buffer, starting from the header.
+ * @num_dumped_dwords: Member of dwords that were dumped.
+ * @results_buf: Buffer for printing the mcp trace results.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf);
+
+/**
+ * qed_print_mcp_trace_results_cont(): Prints MCP Trace results, and
+ * keeps the MCP trace meta data allocated, to support continuous MCP Trace
+ * parsing. After the continuous parsing ends, mcp_trace_free_meta_data should
+ * be called to free the meta data.
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: MVP trace dump buffer, starting from the header.
+ * @results_buf: Buffer for printing the mcp trace results.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_mcp_trace_results_cont(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ char *results_buf);
+
+/**
+ * qed_print_mcp_trace_line(): Prints MCP Trace results for a single line
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: MCP trace dump buffer, starting from the header.
+ * @num_dumped_bytes: Number of bytes that were dumped.
+ * @results_buf: Buffer for printing the mcp trace results.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn,
+ u8 *dump_buf,
+ u32 num_dumped_bytes,
+ char *results_buf);
+
+/**
+ * qed_mcp_trace_free_meta_data(): Frees the MCP Trace meta data.
+ * Should be called after continuous MCP Trace parsing.
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Return: Void.
+ */
+void qed_mcp_trace_free_meta_data(struct qed_hwfn *p_hwfn);
+
+/**
+ * qed_get_reg_fifo_results_buf_size(): Returns the required buffer size
+ * for reg_fifo results (in bytes).
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: Reg fifo dump buffer.
+ * @num_dumped_dwords: Number of dwords that were dumped.
+ * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed
+ * results.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size);
+
+/**
+ * qed_print_reg_fifo_results(): Prints reg fifo results.
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: Reg fifo dump buffer, starting from the header.
+ * @num_dumped_dwords: Number of dwords that were dumped.
+ * @results_buf: Buffer for printing the reg fifo results.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf);
+
+/**
+ * qed_get_igu_fifo_results_buf_size(): Returns the required buffer size
+ * for igu_fifo results (in bytes).
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: IGU fifo dump buffer.
+ * @num_dumped_dwords: number of dwords that were dumped.
+ * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed
+ * results.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size);
+
+/**
+ * qed_print_igu_fifo_results(): Prints IGU fifo results
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: IGU fifo dump buffer, starting from the header.
+ * @num_dumped_dwords: Number of dwords that were dumped.
+ * @results_buf: Buffer for printing the IGU fifo results.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf);
+
+/**
+ * qed_get_protection_override_results_buf_size(): Returns the required
+ * buffer size for protection override results (in bytes).
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: Protection override dump buffer.
+ * @num_dumped_dwords: Number of dwords that were dumped.
+ * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed
+ * results.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status
+qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size);
+
+/**
+ * qed_print_protection_override_results(): Prints protection override
+ * results.
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: Protection override dump buffer, starting from the header.
+ * @num_dumped_dwords: Number of dwords that were dumped.
+ * @results_buf: Buffer for printing the reg fifo results.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf);
+
+/**
+ * qed_get_fw_asserts_results_buf_size(): Returns the required buffer size
+ * for FW Asserts results (in bytes).
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: FW Asserts dump buffer.
+ * @num_dumped_dwords: number of dwords that were dumped.
+ * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed
+ * results.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size);
+
+/**
+ * qed_print_fw_asserts_results(): Prints FW Asserts results.
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: FW Asserts dump buffer, starting from the header.
+ * @num_dumped_dwords: number of dwords that were dumped.
+ * @results_buf: buffer for printing the FW Asserts results.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf);
+
+/**
+ * qed_dbg_parse_attn(): Parses and prints attention registers values in
+ * the specified results struct.
+ *
+ * @p_hwfn: HW device data.
+ * @results: Pointer to the attention read results
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set.
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
+ struct dbg_attn_block_result *results);
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
new file mode 100644
index 000000000..dc93ddea8
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -0,0 +1,2414 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ * Copyright (c) 2019-2020 Marvell International Ltd.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/bitops.h>
+#include <linux/dcbnl.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include "qed.h"
+#include "qed_cxt.h"
+#include "qed_dcbx.h"
+#include "qed_hsi.h"
+#include "qed_sp.h"
+#include "qed_sriov.h"
+#include "qed_rdma.h"
+#ifdef CONFIG_DCB
+#include <linux/qed/qed_eth_if.h>
+#endif
+
+#define QED_DCBX_MAX_MIB_READ_TRY (100)
+#define QED_ETH_TYPE_DEFAULT (0)
+#define QED_ETH_TYPE_ROCE (0x8915)
+#define QED_UDP_PORT_TYPE_ROCE_V2 (0x12B7)
+#define QED_ETH_TYPE_FCOE (0x8906)
+#define QED_TCP_PORT_ISCSI (0xCBC)
+
+#define QED_DCBX_INVALID_PRIORITY 0xFF
+
+/* Get Traffic Class from priority traffic class table, 4 bits represent
+ * the traffic class corresponding to the priority.
+ */
+#define QED_DCBX_PRIO2TC(prio_tc_tbl, prio) \
+ ((u32)(prio_tc_tbl >> ((7 - prio) * 4)) & 0x7)
+
+static const struct qed_dcbx_app_metadata qed_dcbx_app_update[] = {
+ {DCBX_PROTOCOL_ISCSI, "ISCSI", QED_PCI_ISCSI},
+ {DCBX_PROTOCOL_FCOE, "FCOE", QED_PCI_FCOE},
+ {DCBX_PROTOCOL_ROCE, "ROCE", QED_PCI_ETH_ROCE},
+ {DCBX_PROTOCOL_ROCE_V2, "ROCE_V2", QED_PCI_ETH_ROCE},
+ {DCBX_PROTOCOL_ETH, "ETH", QED_PCI_ETH},
+};
+
+static bool qed_dcbx_app_ethtype(u32 app_info_bitmap)
+{
+ return !!(QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) ==
+ DCBX_APP_SF_ETHTYPE);
+}
+
+static bool qed_dcbx_ieee_app_ethtype(u32 app_info_bitmap)
+{
+ u8 mfw_val = QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE);
+
+ /* Old MFW */
+ if (mfw_val == DCBX_APP_SF_IEEE_RESERVED)
+ return qed_dcbx_app_ethtype(app_info_bitmap);
+
+ return !!(mfw_val == DCBX_APP_SF_IEEE_ETHTYPE);
+}
+
+static bool qed_dcbx_app_port(u32 app_info_bitmap)
+{
+ return !!(QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) ==
+ DCBX_APP_SF_PORT);
+}
+
+static bool qed_dcbx_ieee_app_port(u32 app_info_bitmap, u8 type)
+{
+ u8 mfw_val = QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE);
+
+ /* Old MFW */
+ if (mfw_val == DCBX_APP_SF_IEEE_RESERVED)
+ return qed_dcbx_app_port(app_info_bitmap);
+
+ return !!(mfw_val == type || mfw_val == DCBX_APP_SF_IEEE_TCP_UDP_PORT);
+}
+
+static bool qed_dcbx_default_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
+{
+ bool ethtype;
+
+ if (ieee)
+ ethtype = qed_dcbx_ieee_app_ethtype(app_info_bitmap);
+ else
+ ethtype = qed_dcbx_app_ethtype(app_info_bitmap);
+
+ return !!(ethtype && (proto_id == QED_ETH_TYPE_DEFAULT));
+}
+
+static bool qed_dcbx_iscsi_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
+{
+ bool port;
+
+ if (ieee)
+ port = qed_dcbx_ieee_app_port(app_info_bitmap,
+ DCBX_APP_SF_IEEE_TCP_PORT);
+ else
+ port = qed_dcbx_app_port(app_info_bitmap);
+
+ return !!(port && (proto_id == QED_TCP_PORT_ISCSI));
+}
+
+static bool qed_dcbx_fcoe_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
+{
+ bool ethtype;
+
+ if (ieee)
+ ethtype = qed_dcbx_ieee_app_ethtype(app_info_bitmap);
+ else
+ ethtype = qed_dcbx_app_ethtype(app_info_bitmap);
+
+ return !!(ethtype && (proto_id == QED_ETH_TYPE_FCOE));
+}
+
+static bool qed_dcbx_roce_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
+{
+ bool ethtype;
+
+ if (ieee)
+ ethtype = qed_dcbx_ieee_app_ethtype(app_info_bitmap);
+ else
+ ethtype = qed_dcbx_app_ethtype(app_info_bitmap);
+
+ return !!(ethtype && (proto_id == QED_ETH_TYPE_ROCE));
+}
+
+static bool qed_dcbx_roce_v2_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
+{
+ bool port;
+
+ if (ieee)
+ port = qed_dcbx_ieee_app_port(app_info_bitmap,
+ DCBX_APP_SF_IEEE_UDP_PORT);
+ else
+ port = qed_dcbx_app_port(app_info_bitmap);
+
+ return !!(port && (proto_id == QED_UDP_PORT_TYPE_ROCE_V2));
+}
+
+static void
+qed_dcbx_dp_protocol(struct qed_hwfn *p_hwfn, struct qed_dcbx_results *p_data)
+{
+ enum dcbx_protocol_type id;
+ int i;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB, "DCBX negotiated: %d\n",
+ p_data->dcbx_enabled);
+
+ for (i = 0; i < ARRAY_SIZE(qed_dcbx_app_update); i++) {
+ id = qed_dcbx_app_update[i].id;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB,
+ "%s info: update %d, enable %d, prio %d, tc %d, num_tc %d\n",
+ qed_dcbx_app_update[i].name, p_data->arr[id].update,
+ p_data->arr[id].enable, p_data->arr[id].priority,
+ p_data->arr[id].tc, p_hwfn->hw_info.num_active_tc);
+ }
+}
+
+static void
+qed_dcbx_set_params(struct qed_dcbx_results *p_data,
+ struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ bool app_tlv, bool enable, u8 prio, u8 tc,
+ enum dcbx_protocol_type type,
+ enum qed_pci_personality personality)
+{
+ /* PF update ramrod data */
+ p_data->arr[type].enable = enable;
+ p_data->arr[type].priority = prio;
+ p_data->arr[type].tc = tc;
+ if (enable)
+ p_data->arr[type].update = UPDATE_DCB;
+ else
+ p_data->arr[type].update = DONT_UPDATE_DCB_DSCP;
+
+ if (test_bit(QED_MF_DONT_ADD_VLAN0_TAG, &p_hwfn->cdev->mf_bits))
+ p_data->arr[type].dont_add_vlan0 = true;
+
+ /* QM reconf data */
+ if (app_tlv && p_hwfn->hw_info.personality == personality)
+ qed_hw_info_set_offload_tc(&p_hwfn->hw_info, tc);
+
+ /* Configure dcbx vlan priority in doorbell block for roce EDPM */
+ if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) &&
+ type == DCBX_PROTOCOL_ROCE) {
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1);
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_PCP_BB_K2, prio << 1);
+ }
+}
+
+/* Update app protocol data and hw_info fields with the TLV info */
+static void
+qed_dcbx_update_app_info(struct qed_dcbx_results *p_data,
+ struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ bool app_tlv, bool enable, u8 prio, u8 tc,
+ enum dcbx_protocol_type type)
+{
+ enum qed_pci_personality personality;
+ enum dcbx_protocol_type id;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qed_dcbx_app_update); i++) {
+ id = qed_dcbx_app_update[i].id;
+
+ if (type != id)
+ continue;
+
+ personality = qed_dcbx_app_update[i].personality;
+
+ qed_dcbx_set_params(p_data, p_hwfn, p_ptt, app_tlv, enable,
+ prio, tc, type, personality);
+ }
+}
+
+static bool
+qed_dcbx_get_app_protocol_type(struct qed_hwfn *p_hwfn,
+ u32 app_prio_bitmap,
+ u16 id, enum dcbx_protocol_type *type, bool ieee)
+{
+ if (qed_dcbx_fcoe_tlv(app_prio_bitmap, id, ieee)) {
+ *type = DCBX_PROTOCOL_FCOE;
+ } else if (qed_dcbx_roce_tlv(app_prio_bitmap, id, ieee)) {
+ *type = DCBX_PROTOCOL_ROCE;
+ } else if (qed_dcbx_iscsi_tlv(app_prio_bitmap, id, ieee)) {
+ *type = DCBX_PROTOCOL_ISCSI;
+ } else if (qed_dcbx_default_tlv(app_prio_bitmap, id, ieee)) {
+ *type = DCBX_PROTOCOL_ETH;
+ } else if (qed_dcbx_roce_v2_tlv(app_prio_bitmap, id, ieee)) {
+ *type = DCBX_PROTOCOL_ROCE_V2;
+ } else {
+ *type = DCBX_MAX_PROTOCOL_TYPE;
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB,
+ "No action required, App TLV entry = 0x%x\n",
+ app_prio_bitmap);
+ return false;
+ }
+
+ return true;
+}
+
+/* Parse app TLV's to update TC information in hw_info structure for
+ * reconfiguring QM. Get protocol specific data for PF update ramrod command.
+ */
+static int
+qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ struct qed_dcbx_results *p_data,
+ struct dcbx_app_priority_entry *p_tbl,
+ u32 pri_tc_tbl, int count, u8 dcbx_version)
+{
+ enum dcbx_protocol_type type;
+ bool enable, ieee, eth_tlv;
+ u8 tc, priority_map;
+ u16 protocol_id;
+ int priority;
+ int i;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Num APP entries = %d\n", count);
+
+ ieee = (dcbx_version == DCBX_CONFIG_VERSION_IEEE);
+ eth_tlv = false;
+ /* Parse APP TLV */
+ for (i = 0; i < count; i++) {
+ protocol_id = QED_MFW_GET_FIELD(p_tbl[i].entry,
+ DCBX_APP_PROTOCOL_ID);
+ priority_map = QED_MFW_GET_FIELD(p_tbl[i].entry,
+ DCBX_APP_PRI_MAP);
+ priority = ffs(priority_map) - 1;
+ if (priority < 0) {
+ DP_ERR(p_hwfn, "Invalid priority\n");
+ return -EINVAL;
+ }
+
+ tc = QED_DCBX_PRIO2TC(pri_tc_tbl, priority);
+ if (qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry,
+ protocol_id, &type, ieee)) {
+ /* ETH always have the enable bit reset, as it gets
+ * vlan information per packet. For other protocols,
+ * should be set according to the dcbx_enabled
+ * indication, but we only got here if there was an
+ * app tlv for the protocol, so dcbx must be enabled.
+ */
+ if (type == DCBX_PROTOCOL_ETH) {
+ enable = false;
+ eth_tlv = true;
+ } else {
+ enable = true;
+ }
+
+ qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, true,
+ enable, priority, tc, type);
+ }
+ }
+
+ /* If Eth TLV is not detected, use UFP TC as default TC */
+ if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) && !eth_tlv)
+ p_data->arr[DCBX_PROTOCOL_ETH].tc = p_hwfn->ufp_info.tc;
+
+ /* Update ramrod protocol data and hw_info fields
+ * with default info when corresponding APP TLV's are not detected.
+ * The enabled field has a different logic for ethernet as only for
+ * ethernet dcb should disabled by default, as the information arrives
+ * from the OS (unless an explicit app tlv was present).
+ */
+ tc = p_data->arr[DCBX_PROTOCOL_ETH].tc;
+ priority = p_data->arr[DCBX_PROTOCOL_ETH].priority;
+ for (type = 0; type < DCBX_MAX_PROTOCOL_TYPE; type++) {
+ if (p_data->arr[type].update)
+ continue;
+
+ enable = (type == DCBX_PROTOCOL_ETH) ? false : !!dcbx_version;
+ qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, false, enable,
+ priority, tc, type);
+ }
+
+ return 0;
+}
+
+/* Parse app TLV's to update TC information in hw_info structure for
+ * reconfiguring QM. Get protocol specific data for PF update ramrod command.
+ */
+static int
+qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct dcbx_app_priority_feature *p_app;
+ struct dcbx_app_priority_entry *p_tbl;
+ struct qed_dcbx_results data = { 0 };
+ struct dcbx_ets_feature *p_ets;
+ struct qed_hw_info *p_info;
+ u32 pri_tc_tbl, flags;
+ u8 dcbx_version;
+ int num_entries;
+ int rc = 0;
+
+ flags = p_hwfn->p_dcbx_info->operational.flags;
+ dcbx_version = QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION);
+
+ p_app = &p_hwfn->p_dcbx_info->operational.features.app;
+ p_tbl = p_app->app_pri_tbl;
+
+ p_ets = &p_hwfn->p_dcbx_info->operational.features.ets;
+ pri_tc_tbl = p_ets->pri_tc_tbl[0];
+
+ p_info = &p_hwfn->hw_info;
+ num_entries = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES);
+
+ rc = qed_dcbx_process_tlv(p_hwfn, p_ptt, &data, p_tbl, pri_tc_tbl,
+ num_entries, dcbx_version);
+ if (rc)
+ return rc;
+
+ p_info->num_active_tc = QED_MFW_GET_FIELD(p_ets->flags,
+ DCBX_ETS_MAX_TCS);
+ p_hwfn->qm_info.ooo_tc = QED_MFW_GET_FIELD(p_ets->flags, DCBX_OOO_TC);
+ data.pf_id = p_hwfn->rel_pf_id;
+ data.dcbx_enabled = !!dcbx_version;
+
+ qed_dcbx_dp_protocol(p_hwfn, &data);
+
+ memcpy(&p_hwfn->p_dcbx_info->results, &data,
+ sizeof(struct qed_dcbx_results));
+
+ return 0;
+}
+
+static int
+qed_dcbx_copy_mib(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_dcbx_mib_meta_data *p_data,
+ enum qed_mib_read_type type)
+{
+ u32 prefix_seq_num, suffix_seq_num;
+ int read_count = 0;
+ int rc = 0;
+
+ /* The data is considered to be valid only if both sequence numbers are
+ * the same.
+ */
+ do {
+ if (type == QED_DCBX_REMOTE_LLDP_MIB) {
+ qed_memcpy_from(p_hwfn, p_ptt, p_data->lldp_remote,
+ p_data->addr, p_data->size);
+ prefix_seq_num = p_data->lldp_remote->prefix_seq_num;
+ suffix_seq_num = p_data->lldp_remote->suffix_seq_num;
+ } else {
+ qed_memcpy_from(p_hwfn, p_ptt, p_data->mib,
+ p_data->addr, p_data->size);
+ prefix_seq_num = p_data->mib->prefix_seq_num;
+ suffix_seq_num = p_data->mib->suffix_seq_num;
+ }
+ read_count++;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DCB,
+ "mib type = %d, try count = %d prefix seq num = %d suffix seq num = %d\n",
+ type, read_count, prefix_seq_num, suffix_seq_num);
+ } while ((prefix_seq_num != suffix_seq_num) &&
+ (read_count < QED_DCBX_MAX_MIB_READ_TRY));
+
+ if (read_count >= QED_DCBX_MAX_MIB_READ_TRY) {
+ DP_ERR(p_hwfn,
+ "MIB read err, mib type = %d, try count = %d prefix seq num = %d suffix seq num = %d\n",
+ type, read_count, prefix_seq_num, suffix_seq_num);
+ rc = -EIO;
+ }
+
+ return rc;
+}
+
+static void
+qed_dcbx_get_priority_info(struct qed_hwfn *p_hwfn,
+ struct qed_dcbx_app_prio *p_prio,
+ struct qed_dcbx_results *p_results)
+{
+ u8 val;
+
+ p_prio->roce = QED_DCBX_INVALID_PRIORITY;
+ p_prio->roce_v2 = QED_DCBX_INVALID_PRIORITY;
+ p_prio->iscsi = QED_DCBX_INVALID_PRIORITY;
+ p_prio->fcoe = QED_DCBX_INVALID_PRIORITY;
+
+ if (p_results->arr[DCBX_PROTOCOL_ROCE].update &&
+ p_results->arr[DCBX_PROTOCOL_ROCE].enable)
+ p_prio->roce = p_results->arr[DCBX_PROTOCOL_ROCE].priority;
+
+ if (p_results->arr[DCBX_PROTOCOL_ROCE_V2].update &&
+ p_results->arr[DCBX_PROTOCOL_ROCE_V2].enable) {
+ val = p_results->arr[DCBX_PROTOCOL_ROCE_V2].priority;
+ p_prio->roce_v2 = val;
+ }
+
+ if (p_results->arr[DCBX_PROTOCOL_ISCSI].update &&
+ p_results->arr[DCBX_PROTOCOL_ISCSI].enable)
+ p_prio->iscsi = p_results->arr[DCBX_PROTOCOL_ISCSI].priority;
+
+ if (p_results->arr[DCBX_PROTOCOL_FCOE].update &&
+ p_results->arr[DCBX_PROTOCOL_FCOE].enable)
+ p_prio->fcoe = p_results->arr[DCBX_PROTOCOL_FCOE].priority;
+
+ if (p_results->arr[DCBX_PROTOCOL_ETH].update &&
+ p_results->arr[DCBX_PROTOCOL_ETH].enable)
+ p_prio->eth = p_results->arr[DCBX_PROTOCOL_ETH].priority;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB,
+ "Priorities: iscsi %d, roce %d, roce v2 %d, fcoe %d, eth %d\n",
+ p_prio->iscsi, p_prio->roce, p_prio->roce_v2, p_prio->fcoe,
+ p_prio->eth);
+}
+
+static void
+qed_dcbx_get_app_data(struct qed_hwfn *p_hwfn,
+ struct dcbx_app_priority_feature *p_app,
+ struct dcbx_app_priority_entry *p_tbl,
+ struct qed_dcbx_params *p_params, bool ieee)
+{
+ struct qed_app_entry *entry;
+ u8 pri_map;
+ int i;
+
+ p_params->app_willing = QED_MFW_GET_FIELD(p_app->flags,
+ DCBX_APP_WILLING);
+ p_params->app_valid = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_ENABLED);
+ p_params->app_error = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_ERROR);
+ p_params->num_app_entries = QED_MFW_GET_FIELD(p_app->flags,
+ DCBX_APP_NUM_ENTRIES);
+ for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
+ entry = &p_params->app_entry[i];
+ if (ieee) {
+ u8 sf_ieee;
+ u32 val;
+
+ sf_ieee = QED_MFW_GET_FIELD(p_tbl[i].entry,
+ DCBX_APP_SF_IEEE);
+ switch (sf_ieee) {
+ case DCBX_APP_SF_IEEE_RESERVED:
+ /* Old MFW */
+ val = QED_MFW_GET_FIELD(p_tbl[i].entry,
+ DCBX_APP_SF);
+ entry->sf_ieee = val ?
+ QED_DCBX_SF_IEEE_TCP_UDP_PORT :
+ QED_DCBX_SF_IEEE_ETHTYPE;
+ break;
+ case DCBX_APP_SF_IEEE_ETHTYPE:
+ entry->sf_ieee = QED_DCBX_SF_IEEE_ETHTYPE;
+ break;
+ case DCBX_APP_SF_IEEE_TCP_PORT:
+ entry->sf_ieee = QED_DCBX_SF_IEEE_TCP_PORT;
+ break;
+ case DCBX_APP_SF_IEEE_UDP_PORT:
+ entry->sf_ieee = QED_DCBX_SF_IEEE_UDP_PORT;
+ break;
+ case DCBX_APP_SF_IEEE_TCP_UDP_PORT:
+ entry->sf_ieee = QED_DCBX_SF_IEEE_TCP_UDP_PORT;
+ break;
+ }
+ } else {
+ entry->ethtype = !(QED_MFW_GET_FIELD(p_tbl[i].entry,
+ DCBX_APP_SF));
+ }
+
+ pri_map = QED_MFW_GET_FIELD(p_tbl[i].entry, DCBX_APP_PRI_MAP);
+ entry->prio = ffs(pri_map) - 1;
+ entry->proto_id = QED_MFW_GET_FIELD(p_tbl[i].entry,
+ DCBX_APP_PROTOCOL_ID);
+ qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry,
+ entry->proto_id,
+ &entry->proto_type, ieee);
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB,
+ "APP params: willing %d, valid %d error = %d\n",
+ p_params->app_willing, p_params->app_valid,
+ p_params->app_error);
+}
+
+static void
+qed_dcbx_get_pfc_data(struct qed_hwfn *p_hwfn,
+ u32 pfc, struct qed_dcbx_params *p_params)
+{
+ u8 pfc_map;
+
+ p_params->pfc.willing = QED_MFW_GET_FIELD(pfc, DCBX_PFC_WILLING);
+ p_params->pfc.max_tc = QED_MFW_GET_FIELD(pfc, DCBX_PFC_CAPS);
+ p_params->pfc.enabled = QED_MFW_GET_FIELD(pfc, DCBX_PFC_ENABLED);
+ pfc_map = QED_MFW_GET_FIELD(pfc, DCBX_PFC_PRI_EN_BITMAP);
+ p_params->pfc.prio[0] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_0);
+ p_params->pfc.prio[1] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_1);
+ p_params->pfc.prio[2] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_2);
+ p_params->pfc.prio[3] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_3);
+ p_params->pfc.prio[4] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_4);
+ p_params->pfc.prio[5] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_5);
+ p_params->pfc.prio[6] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_6);
+ p_params->pfc.prio[7] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_7);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB,
+ "PFC params: willing %d, pfc_bitmap %u max_tc = %u enabled = %d\n",
+ p_params->pfc.willing, pfc_map, p_params->pfc.max_tc,
+ p_params->pfc.enabled);
+}
+
+static void
+qed_dcbx_get_ets_data(struct qed_hwfn *p_hwfn,
+ struct dcbx_ets_feature *p_ets,
+ struct qed_dcbx_params *p_params)
+{
+ __be32 bw_map[2], tsa_map[2];
+ u32 pri_map;
+ int i;
+
+ p_params->ets_willing = QED_MFW_GET_FIELD(p_ets->flags,
+ DCBX_ETS_WILLING);
+ p_params->ets_enabled = QED_MFW_GET_FIELD(p_ets->flags,
+ DCBX_ETS_ENABLED);
+ p_params->ets_cbs = QED_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_CBS);
+ p_params->max_ets_tc = QED_MFW_GET_FIELD(p_ets->flags,
+ DCBX_ETS_MAX_TCS);
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB,
+ "ETS params: willing %d, enabled = %d ets_cbs %d pri_tc_tbl_0 %x max_ets_tc %d\n",
+ p_params->ets_willing, p_params->ets_enabled,
+ p_params->ets_cbs, p_ets->pri_tc_tbl[0],
+ p_params->max_ets_tc);
+
+ if (p_params->ets_enabled && !p_params->max_ets_tc) {
+ p_params->max_ets_tc = QED_MAX_PFC_PRIORITIES;
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB,
+ "ETS params: max_ets_tc is forced to %d\n",
+ p_params->max_ets_tc);
+ }
+
+ /* 8 bit tsa and bw data corresponding to each of the 8 TC's are
+ * encoded in a type u32 array of size 2.
+ */
+ cpu_to_be32_array(bw_map, p_ets->tc_bw_tbl, 2);
+ cpu_to_be32_array(tsa_map, p_ets->tc_tsa_tbl, 2);
+ pri_map = p_ets->pri_tc_tbl[0];
+
+ for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) {
+ p_params->ets_tc_bw_tbl[i] = ((u8 *)bw_map)[i];
+ p_params->ets_tc_tsa_tbl[i] = ((u8 *)tsa_map)[i];
+ p_params->ets_pri_tc_tbl[i] = QED_DCBX_PRIO2TC(pri_map, i);
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB,
+ "elem %d bw_tbl %x tsa_tbl %x\n",
+ i, p_params->ets_tc_bw_tbl[i],
+ p_params->ets_tc_tsa_tbl[i]);
+ }
+}
+
+static void
+qed_dcbx_get_common_params(struct qed_hwfn *p_hwfn,
+ struct dcbx_app_priority_feature *p_app,
+ struct dcbx_app_priority_entry *p_tbl,
+ struct dcbx_ets_feature *p_ets,
+ u32 pfc, struct qed_dcbx_params *p_params, bool ieee)
+{
+ qed_dcbx_get_app_data(p_hwfn, p_app, p_tbl, p_params, ieee);
+ qed_dcbx_get_ets_data(p_hwfn, p_ets, p_params);
+ qed_dcbx_get_pfc_data(p_hwfn, pfc, p_params);
+}
+
+static void
+qed_dcbx_get_local_params(struct qed_hwfn *p_hwfn, struct qed_dcbx_get *params)
+{
+ struct dcbx_features *p_feat;
+
+ p_feat = &p_hwfn->p_dcbx_info->local_admin.features;
+ qed_dcbx_get_common_params(p_hwfn, &p_feat->app,
+ p_feat->app.app_pri_tbl, &p_feat->ets,
+ p_feat->pfc, &params->local.params, false);
+ params->local.valid = true;
+}
+
+static void
+qed_dcbx_get_remote_params(struct qed_hwfn *p_hwfn, struct qed_dcbx_get *params)
+{
+ struct dcbx_features *p_feat;
+
+ p_feat = &p_hwfn->p_dcbx_info->remote.features;
+ qed_dcbx_get_common_params(p_hwfn, &p_feat->app,
+ p_feat->app.app_pri_tbl, &p_feat->ets,
+ p_feat->pfc, &params->remote.params, false);
+ params->remote.valid = true;
+}
+
+static void
+qed_dcbx_get_operational_params(struct qed_hwfn *p_hwfn,
+ struct qed_dcbx_get *params)
+{
+ struct qed_dcbx_operational_params *p_operational;
+ struct qed_dcbx_results *p_results;
+ struct dcbx_features *p_feat;
+ bool enabled, err;
+ u32 flags;
+ bool val;
+
+ flags = p_hwfn->p_dcbx_info->operational.flags;
+
+ /* If DCBx version is non zero, then negotiation
+ * was successfuly performed
+ */
+ p_operational = &params->operational;
+ enabled = !!(QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) !=
+ DCBX_CONFIG_VERSION_DISABLED);
+ if (!enabled) {
+ p_operational->enabled = enabled;
+ p_operational->valid = false;
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Dcbx is disabled\n");
+ return;
+ }
+
+ p_feat = &p_hwfn->p_dcbx_info->operational.features;
+ p_results = &p_hwfn->p_dcbx_info->results;
+
+ val = !!(QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) ==
+ DCBX_CONFIG_VERSION_IEEE);
+ p_operational->ieee = val;
+ val = !!(QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) ==
+ DCBX_CONFIG_VERSION_CEE);
+ p_operational->cee = val;
+
+ val = !!(QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) ==
+ DCBX_CONFIG_VERSION_STATIC);
+ p_operational->local = val;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB,
+ "Version support: ieee %d, cee %d, static %d\n",
+ p_operational->ieee, p_operational->cee,
+ p_operational->local);
+
+ qed_dcbx_get_common_params(p_hwfn, &p_feat->app,
+ p_feat->app.app_pri_tbl, &p_feat->ets,
+ p_feat->pfc, &params->operational.params,
+ p_operational->ieee);
+ qed_dcbx_get_priority_info(p_hwfn, &p_operational->app_prio, p_results);
+ err = QED_MFW_GET_FIELD(p_feat->app.flags, DCBX_APP_ERROR);
+ p_operational->err = err;
+ p_operational->enabled = enabled;
+ p_operational->valid = true;
+}
+
+static void
+qed_dcbx_get_local_lldp_params(struct qed_hwfn *p_hwfn,
+ struct qed_dcbx_get *params)
+{
+ struct lldp_config_params_s *p_local;
+
+ p_local = &p_hwfn->p_dcbx_info->lldp_local[LLDP_NEAREST_BRIDGE];
+
+ memcpy(params->lldp_local.local_chassis_id, p_local->local_chassis_id,
+ sizeof(p_local->local_chassis_id));
+ memcpy(params->lldp_local.local_port_id, p_local->local_port_id,
+ sizeof(p_local->local_port_id));
+}
+
+static void
+qed_dcbx_get_remote_lldp_params(struct qed_hwfn *p_hwfn,
+ struct qed_dcbx_get *params)
+{
+ struct lldp_status_params_s *p_remote;
+
+ p_remote = &p_hwfn->p_dcbx_info->lldp_remote[LLDP_NEAREST_BRIDGE];
+
+ memcpy(params->lldp_remote.peer_chassis_id, p_remote->peer_chassis_id,
+ sizeof(p_remote->peer_chassis_id));
+ memcpy(params->lldp_remote.peer_port_id, p_remote->peer_port_id,
+ sizeof(p_remote->peer_port_id));
+}
+
+static int
+qed_dcbx_get_params(struct qed_hwfn *p_hwfn, struct qed_dcbx_get *p_params,
+ enum qed_mib_read_type type)
+{
+ switch (type) {
+ case QED_DCBX_REMOTE_MIB:
+ qed_dcbx_get_remote_params(p_hwfn, p_params);
+ break;
+ case QED_DCBX_LOCAL_MIB:
+ qed_dcbx_get_local_params(p_hwfn, p_params);
+ break;
+ case QED_DCBX_OPERATIONAL_MIB:
+ qed_dcbx_get_operational_params(p_hwfn, p_params);
+ break;
+ case QED_DCBX_REMOTE_LLDP_MIB:
+ qed_dcbx_get_remote_lldp_params(p_hwfn, p_params);
+ break;
+ case QED_DCBX_LOCAL_LLDP_MIB:
+ qed_dcbx_get_local_lldp_params(p_hwfn, p_params);
+ break;
+ default:
+ DP_ERR(p_hwfn, "MIB read err, unknown mib type %d\n", type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+qed_dcbx_read_local_lldp_mib(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_dcbx_mib_meta_data data;
+
+ memset(&data, 0, sizeof(data));
+ data.addr = p_hwfn->mcp_info->port_addr + offsetof(struct public_port,
+ lldp_config_params);
+ data.lldp_local = p_hwfn->p_dcbx_info->lldp_local;
+ data.size = sizeof(struct lldp_config_params_s);
+ qed_memcpy_from(p_hwfn, p_ptt, data.lldp_local, data.addr, data.size);
+
+ return 0;
+}
+
+static int
+qed_dcbx_read_remote_lldp_mib(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_mib_read_type type)
+{
+ struct qed_dcbx_mib_meta_data data;
+ int rc = 0;
+
+ memset(&data, 0, sizeof(data));
+ data.addr = p_hwfn->mcp_info->port_addr + offsetof(struct public_port,
+ lldp_status_params);
+ data.lldp_remote = p_hwfn->p_dcbx_info->lldp_remote;
+ data.size = sizeof(struct lldp_status_params_s);
+ rc = qed_dcbx_copy_mib(p_hwfn, p_ptt, &data, type);
+
+ return rc;
+}
+
+static int
+qed_dcbx_read_operational_mib(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_mib_read_type type)
+{
+ struct qed_dcbx_mib_meta_data data;
+ int rc = 0;
+
+ memset(&data, 0, sizeof(data));
+ data.addr = p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, operational_dcbx_mib);
+ data.mib = &p_hwfn->p_dcbx_info->operational;
+ data.size = sizeof(struct dcbx_mib);
+ rc = qed_dcbx_copy_mib(p_hwfn, p_ptt, &data, type);
+
+ return rc;
+}
+
+static int
+qed_dcbx_read_remote_mib(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, enum qed_mib_read_type type)
+{
+ struct qed_dcbx_mib_meta_data data;
+ int rc = 0;
+
+ memset(&data, 0, sizeof(data));
+ data.addr = p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, remote_dcbx_mib);
+ data.mib = &p_hwfn->p_dcbx_info->remote;
+ data.size = sizeof(struct dcbx_mib);
+ rc = qed_dcbx_copy_mib(p_hwfn, p_ptt, &data, type);
+
+ return rc;
+}
+
+static int
+qed_dcbx_read_local_mib(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_dcbx_mib_meta_data data;
+
+ memset(&data, 0, sizeof(data));
+ data.addr = p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, local_admin_dcbx_mib);
+ data.local_admin = &p_hwfn->p_dcbx_info->local_admin;
+ data.size = sizeof(struct dcbx_local_params);
+ qed_memcpy_from(p_hwfn, p_ptt, data.local_admin, data.addr, data.size);
+
+ return 0;
+}
+
+static int qed_dcbx_read_mib(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, enum qed_mib_read_type type)
+{
+ int rc = -EINVAL;
+
+ switch (type) {
+ case QED_DCBX_OPERATIONAL_MIB:
+ rc = qed_dcbx_read_operational_mib(p_hwfn, p_ptt, type);
+ break;
+ case QED_DCBX_REMOTE_MIB:
+ rc = qed_dcbx_read_remote_mib(p_hwfn, p_ptt, type);
+ break;
+ case QED_DCBX_LOCAL_MIB:
+ rc = qed_dcbx_read_local_mib(p_hwfn, p_ptt);
+ break;
+ case QED_DCBX_REMOTE_LLDP_MIB:
+ rc = qed_dcbx_read_remote_lldp_mib(p_hwfn, p_ptt, type);
+ break;
+ case QED_DCBX_LOCAL_LLDP_MIB:
+ rc = qed_dcbx_read_local_lldp_mib(p_hwfn, p_ptt);
+ break;
+ default:
+ DP_ERR(p_hwfn, "MIB read err, unknown mib type %d\n", type);
+ }
+
+ return rc;
+}
+
+static void qed_dcbx_aen(struct qed_hwfn *hwfn, u32 mib_type)
+{
+ struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common;
+ void *cookie = hwfn->cdev->ops_cookie;
+
+ if (cookie && op->dcbx_aen)
+ op->dcbx_aen(cookie, &hwfn->p_dcbx_info->get, mib_type);
+}
+
+/* Read updated MIB.
+ * Reconfigure QM and invoke PF update ramrod command if operational MIB
+ * change is detected.
+ */
+int
+qed_dcbx_mib_update_event(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, enum qed_mib_read_type type)
+{
+ int rc = 0;
+
+ rc = qed_dcbx_read_mib(p_hwfn, p_ptt, type);
+ if (rc)
+ return rc;
+
+ if (type == QED_DCBX_OPERATIONAL_MIB) {
+ rc = qed_dcbx_process_mib_info(p_hwfn, p_ptt);
+ if (!rc) {
+ /* reconfigure tcs of QM queues according
+ * to negotiation results
+ */
+ qed_qm_reconf(p_hwfn, p_ptt);
+
+ /* update storm FW with negotiation results */
+ qed_sp_pf_update(p_hwfn);
+
+ /* for roce PFs, we may want to enable/disable DPM
+ * when DCBx change occurs
+ */
+ if (p_hwfn->hw_info.personality ==
+ QED_PCI_ETH_ROCE)
+ qed_roce_dpm_dcbx(p_hwfn, p_ptt);
+ }
+ }
+
+ qed_dcbx_get_params(p_hwfn, &p_hwfn->p_dcbx_info->get, type);
+
+ if (type == QED_DCBX_OPERATIONAL_MIB) {
+ struct qed_dcbx_results *p_data;
+ u16 val;
+
+ /* Configure in NIG which protocols support EDPM and should
+ * honor PFC.
+ */
+ p_data = &p_hwfn->p_dcbx_info->results;
+ val = (0x1 << p_data->arr[DCBX_PROTOCOL_ROCE].tc) |
+ (0x1 << p_data->arr[DCBX_PROTOCOL_ROCE_V2].tc);
+ val <<= NIG_REG_TX_EDPM_CTRL_TX_EDPM_TC_EN_SHIFT;
+ val |= NIG_REG_TX_EDPM_CTRL_TX_EDPM_EN;
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TX_EDPM_CTRL, val);
+ }
+
+ qed_dcbx_aen(p_hwfn, type);
+
+ return rc;
+}
+
+int qed_dcbx_info_alloc(struct qed_hwfn *p_hwfn)
+{
+ p_hwfn->p_dcbx_info = kzalloc(sizeof(*p_hwfn->p_dcbx_info), GFP_KERNEL);
+ if (!p_hwfn->p_dcbx_info)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void qed_dcbx_info_free(struct qed_hwfn *p_hwfn)
+{
+ kfree(p_hwfn->p_dcbx_info);
+ p_hwfn->p_dcbx_info = NULL;
+}
+
+static void qed_dcbx_update_protocol_data(struct protocol_dcb_data *p_data,
+ struct qed_dcbx_results *p_src,
+ enum dcbx_protocol_type type)
+{
+ p_data->dcb_enable_flag = p_src->arr[type].enable;
+ p_data->dcb_priority = p_src->arr[type].priority;
+ p_data->dcb_tc = p_src->arr[type].tc;
+ p_data->dcb_dont_add_vlan0 = p_src->arr[type].dont_add_vlan0;
+}
+
+/* Set pf update ramrod command params */
+void qed_dcbx_set_pf_update_params(struct qed_dcbx_results *p_src,
+ struct pf_update_ramrod_data *p_dest)
+{
+ struct protocol_dcb_data *p_dcb_data;
+ u8 update_flag;
+
+ update_flag = p_src->arr[DCBX_PROTOCOL_FCOE].update;
+ p_dest->update_fcoe_dcb_data_mode = update_flag;
+
+ update_flag = p_src->arr[DCBX_PROTOCOL_ROCE].update;
+ p_dest->update_roce_dcb_data_mode = update_flag;
+
+ update_flag = p_src->arr[DCBX_PROTOCOL_ROCE_V2].update;
+ p_dest->update_rroce_dcb_data_mode = update_flag;
+
+ update_flag = p_src->arr[DCBX_PROTOCOL_ISCSI].update;
+ p_dest->update_iscsi_dcb_data_mode = update_flag;
+ update_flag = p_src->arr[DCBX_PROTOCOL_ETH].update;
+ p_dest->update_eth_dcb_data_mode = update_flag;
+
+ p_dcb_data = &p_dest->fcoe_dcb_data;
+ qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_FCOE);
+ p_dcb_data = &p_dest->roce_dcb_data;
+ qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ROCE);
+ p_dcb_data = &p_dest->rroce_dcb_data;
+ qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ROCE_V2);
+ p_dcb_data = &p_dest->iscsi_dcb_data;
+ qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ISCSI);
+ p_dcb_data = &p_dest->eth_dcb_data;
+ qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ETH);
+}
+
+u8 qed_dcbx_get_priority_tc(struct qed_hwfn *p_hwfn, u8 pri)
+{
+ struct qed_dcbx_get *dcbx_info = &p_hwfn->p_dcbx_info->get;
+
+ if (pri >= QED_MAX_PFC_PRIORITIES) {
+ DP_ERR(p_hwfn, "Invalid priority %d\n", pri);
+ return QED_DCBX_DEFAULT_TC;
+ }
+
+ if (!dcbx_info->operational.valid) {
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB,
+ "Dcbx parameters not available\n");
+ return QED_DCBX_DEFAULT_TC;
+ }
+
+ return dcbx_info->operational.params.ets_pri_tc_tbl[pri];
+}
+
+#ifdef CONFIG_DCB
+static int qed_dcbx_query_params(struct qed_hwfn *p_hwfn,
+ struct qed_dcbx_get *p_get,
+ enum qed_mib_read_type type)
+{
+ struct qed_ptt *p_ptt;
+ int rc;
+
+ if (IS_VF(p_hwfn->cdev))
+ return -EINVAL;
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EBUSY;
+
+ rc = qed_dcbx_read_mib(p_hwfn, p_ptt, type);
+ if (rc)
+ goto out;
+
+ rc = qed_dcbx_get_params(p_hwfn, p_get, type);
+
+out:
+ qed_ptt_release(p_hwfn, p_ptt);
+ return rc;
+}
+
+static void
+qed_dcbx_set_pfc_data(struct qed_hwfn *p_hwfn,
+ u32 *pfc, struct qed_dcbx_params *p_params)
+{
+ u8 pfc_map = 0;
+ int i;
+
+ *pfc &= ~DCBX_PFC_ERROR_MASK;
+
+ if (p_params->pfc.willing)
+ *pfc |= DCBX_PFC_WILLING_MASK;
+ else
+ *pfc &= ~DCBX_PFC_WILLING_MASK;
+
+ if (p_params->pfc.enabled)
+ *pfc |= DCBX_PFC_ENABLED_MASK;
+ else
+ *pfc &= ~DCBX_PFC_ENABLED_MASK;
+
+ *pfc &= ~DCBX_PFC_CAPS_MASK;
+ *pfc |= (u32)p_params->pfc.max_tc << DCBX_PFC_CAPS_SHIFT;
+
+ for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++)
+ if (p_params->pfc.prio[i])
+ pfc_map |= BIT(i);
+
+ *pfc &= ~DCBX_PFC_PRI_EN_BITMAP_MASK;
+ *pfc |= (pfc_map << DCBX_PFC_PRI_EN_BITMAP_SHIFT);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB, "pfc = 0x%x\n", *pfc);
+}
+
+static void
+qed_dcbx_set_ets_data(struct qed_hwfn *p_hwfn,
+ struct dcbx_ets_feature *p_ets,
+ struct qed_dcbx_params *p_params)
+{
+ __be32 bw_map[2], tsa_map[2];
+ u32 val;
+ int i;
+
+ if (p_params->ets_willing)
+ p_ets->flags |= DCBX_ETS_WILLING_MASK;
+ else
+ p_ets->flags &= ~DCBX_ETS_WILLING_MASK;
+
+ if (p_params->ets_cbs)
+ p_ets->flags |= DCBX_ETS_CBS_MASK;
+ else
+ p_ets->flags &= ~DCBX_ETS_CBS_MASK;
+
+ if (p_params->ets_enabled)
+ p_ets->flags |= DCBX_ETS_ENABLED_MASK;
+ else
+ p_ets->flags &= ~DCBX_ETS_ENABLED_MASK;
+
+ p_ets->flags &= ~DCBX_ETS_MAX_TCS_MASK;
+ p_ets->flags |= (u32)p_params->max_ets_tc << DCBX_ETS_MAX_TCS_SHIFT;
+
+ p_ets->pri_tc_tbl[0] = 0;
+
+ for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) {
+ ((u8 *)bw_map)[i] = p_params->ets_tc_bw_tbl[i];
+ ((u8 *)tsa_map)[i] = p_params->ets_tc_tsa_tbl[i];
+
+ /* Copy the priority value to the corresponding 4 bits in the
+ * traffic class table.
+ */
+ val = (((u32)p_params->ets_pri_tc_tbl[i]) << ((7 - i) * 4));
+ p_ets->pri_tc_tbl[0] |= val;
+ }
+
+ be32_to_cpu_array(p_ets->tc_bw_tbl, bw_map, 2);
+ be32_to_cpu_array(p_ets->tc_tsa_tbl, tsa_map, 2);
+}
+
+static void
+qed_dcbx_set_app_data(struct qed_hwfn *p_hwfn,
+ struct dcbx_app_priority_feature *p_app,
+ struct qed_dcbx_params *p_params, bool ieee)
+{
+ u32 *entry;
+ int i;
+
+ if (p_params->app_willing)
+ p_app->flags |= DCBX_APP_WILLING_MASK;
+ else
+ p_app->flags &= ~DCBX_APP_WILLING_MASK;
+
+ if (p_params->app_valid)
+ p_app->flags |= DCBX_APP_ENABLED_MASK;
+ else
+ p_app->flags &= ~DCBX_APP_ENABLED_MASK;
+
+ p_app->flags &= ~DCBX_APP_NUM_ENTRIES_MASK;
+ p_app->flags |= (u32)p_params->num_app_entries <<
+ DCBX_APP_NUM_ENTRIES_SHIFT;
+
+ for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
+ entry = &p_app->app_pri_tbl[i].entry;
+ *entry = 0;
+ if (ieee) {
+ *entry &= ~(DCBX_APP_SF_IEEE_MASK | DCBX_APP_SF_MASK);
+ switch (p_params->app_entry[i].sf_ieee) {
+ case QED_DCBX_SF_IEEE_ETHTYPE:
+ *entry |= ((u32)DCBX_APP_SF_IEEE_ETHTYPE <<
+ DCBX_APP_SF_IEEE_SHIFT);
+ *entry |= ((u32)DCBX_APP_SF_ETHTYPE <<
+ DCBX_APP_SF_SHIFT);
+ break;
+ case QED_DCBX_SF_IEEE_TCP_PORT:
+ *entry |= ((u32)DCBX_APP_SF_IEEE_TCP_PORT <<
+ DCBX_APP_SF_IEEE_SHIFT);
+ *entry |= ((u32)DCBX_APP_SF_PORT <<
+ DCBX_APP_SF_SHIFT);
+ break;
+ case QED_DCBX_SF_IEEE_UDP_PORT:
+ *entry |= ((u32)DCBX_APP_SF_IEEE_UDP_PORT <<
+ DCBX_APP_SF_IEEE_SHIFT);
+ *entry |= ((u32)DCBX_APP_SF_PORT <<
+ DCBX_APP_SF_SHIFT);
+ break;
+ case QED_DCBX_SF_IEEE_TCP_UDP_PORT:
+ *entry |= ((u32)DCBX_APP_SF_IEEE_TCP_UDP_PORT <<
+ DCBX_APP_SF_IEEE_SHIFT);
+ *entry |= ((u32)DCBX_APP_SF_PORT <<
+ DCBX_APP_SF_SHIFT);
+ break;
+ }
+ } else {
+ *entry &= ~DCBX_APP_SF_MASK;
+ if (p_params->app_entry[i].ethtype)
+ *entry |= ((u32)DCBX_APP_SF_ETHTYPE <<
+ DCBX_APP_SF_SHIFT);
+ else
+ *entry |= ((u32)DCBX_APP_SF_PORT <<
+ DCBX_APP_SF_SHIFT);
+ }
+
+ *entry &= ~DCBX_APP_PROTOCOL_ID_MASK;
+ *entry |= ((u32)p_params->app_entry[i].proto_id <<
+ DCBX_APP_PROTOCOL_ID_SHIFT);
+ *entry &= ~DCBX_APP_PRI_MAP_MASK;
+ *entry |= ((u32)(p_params->app_entry[i].prio) <<
+ DCBX_APP_PRI_MAP_SHIFT);
+ }
+}
+
+static void
+qed_dcbx_set_local_params(struct qed_hwfn *p_hwfn,
+ struct dcbx_local_params *local_admin,
+ struct qed_dcbx_set *params)
+{
+ bool ieee = false;
+
+ local_admin->flags = 0;
+ memcpy(&local_admin->features,
+ &p_hwfn->p_dcbx_info->operational.features,
+ sizeof(local_admin->features));
+
+ if (params->enabled) {
+ local_admin->config = params->ver_num;
+ ieee = !!(params->ver_num & DCBX_CONFIG_VERSION_IEEE);
+ } else {
+ local_admin->config = DCBX_CONFIG_VERSION_DISABLED;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Dcbx version = %d\n",
+ local_admin->config);
+
+ if (params->override_flags & QED_DCBX_OVERRIDE_PFC_CFG)
+ qed_dcbx_set_pfc_data(p_hwfn, &local_admin->features.pfc,
+ &params->config.params);
+
+ if (params->override_flags & QED_DCBX_OVERRIDE_ETS_CFG)
+ qed_dcbx_set_ets_data(p_hwfn, &local_admin->features.ets,
+ &params->config.params);
+
+ if (params->override_flags & QED_DCBX_OVERRIDE_APP_CFG)
+ qed_dcbx_set_app_data(p_hwfn, &local_admin->features.app,
+ &params->config.params, ieee);
+}
+
+int qed_dcbx_config_params(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ struct qed_dcbx_set *params, bool hw_commit)
+{
+ struct dcbx_local_params local_admin;
+ struct qed_dcbx_mib_meta_data data;
+ u32 resp = 0, param = 0;
+ int rc = 0;
+
+ if (!hw_commit) {
+ memcpy(&p_hwfn->p_dcbx_info->set, params,
+ sizeof(struct qed_dcbx_set));
+ return 0;
+ }
+
+ /* clear set-parmas cache */
+ memset(&p_hwfn->p_dcbx_info->set, 0, sizeof(p_hwfn->p_dcbx_info->set));
+
+ memset(&local_admin, 0, sizeof(local_admin));
+ qed_dcbx_set_local_params(p_hwfn, &local_admin, params);
+
+ data.addr = p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, local_admin_dcbx_mib);
+ data.local_admin = &local_admin;
+ data.size = sizeof(struct dcbx_local_params);
+ qed_memcpy_to(p_hwfn, p_ptt, data.addr, data.local_admin, data.size);
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_DCBX,
+ 1 << DRV_MB_PARAM_LLDP_SEND_SHIFT, &resp, &param);
+ if (rc)
+ DP_NOTICE(p_hwfn, "Failed to send DCBX update request\n");
+
+ return rc;
+}
+
+int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn,
+ struct qed_dcbx_set *params)
+{
+ struct qed_dcbx_get *dcbx_info;
+ int rc;
+
+ if (p_hwfn->p_dcbx_info->set.config.valid) {
+ memcpy(params, &p_hwfn->p_dcbx_info->set,
+ sizeof(struct qed_dcbx_set));
+ return 0;
+ }
+
+ dcbx_info = kzalloc(sizeof(*dcbx_info), GFP_KERNEL);
+ if (!dcbx_info)
+ return -ENOMEM;
+
+ rc = qed_dcbx_query_params(p_hwfn, dcbx_info, QED_DCBX_OPERATIONAL_MIB);
+ if (rc) {
+ kfree(dcbx_info);
+ return rc;
+ }
+
+ p_hwfn->p_dcbx_info->set.override_flags = 0;
+ p_hwfn->p_dcbx_info->set.ver_num = DCBX_CONFIG_VERSION_DISABLED;
+ if (dcbx_info->operational.cee)
+ p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_CEE;
+ if (dcbx_info->operational.ieee)
+ p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_IEEE;
+ if (dcbx_info->operational.local)
+ p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_STATIC;
+
+ p_hwfn->p_dcbx_info->set.enabled = dcbx_info->operational.enabled;
+ BUILD_BUG_ON(sizeof(dcbx_info->operational.params) !=
+ sizeof(p_hwfn->p_dcbx_info->set.config.params));
+ memcpy(&p_hwfn->p_dcbx_info->set.config.params,
+ &dcbx_info->operational.params,
+ sizeof(p_hwfn->p_dcbx_info->set.config.params));
+ p_hwfn->p_dcbx_info->set.config.valid = true;
+
+ memcpy(params, &p_hwfn->p_dcbx_info->set, sizeof(struct qed_dcbx_set));
+
+ kfree(dcbx_info);
+
+ return 0;
+}
+
+static struct qed_dcbx_get *qed_dcbnl_get_dcbx(struct qed_hwfn *hwfn,
+ enum qed_mib_read_type type)
+{
+ struct qed_dcbx_get *dcbx_info;
+
+ dcbx_info = kzalloc(sizeof(*dcbx_info), GFP_ATOMIC);
+ if (!dcbx_info)
+ return NULL;
+
+ if (qed_dcbx_query_params(hwfn, dcbx_info, type)) {
+ kfree(dcbx_info);
+ return NULL;
+ }
+
+ if ((type == QED_DCBX_OPERATIONAL_MIB) &&
+ !dcbx_info->operational.enabled) {
+ DP_INFO(hwfn, "DCBX is not enabled/operational\n");
+ kfree(dcbx_info);
+ return NULL;
+ }
+
+ return dcbx_info;
+}
+
+static u8 qed_dcbnl_getstate(struct qed_dev *cdev)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ bool enabled;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return 0;
+
+ enabled = dcbx_info->operational.enabled;
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "DCB state = %d\n", enabled);
+ kfree(dcbx_info);
+
+ return enabled;
+}
+
+static u8 qed_dcbnl_setstate(struct qed_dev *cdev, u8 state)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_set dcbx_set;
+ struct qed_ptt *ptt;
+ int rc;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "DCB state = %d\n", state);
+
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return 1;
+
+ dcbx_set.enabled = !!state;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return 1;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return rc ? 1 : 0;
+}
+
+static void qed_dcbnl_getpgtccfgtx(struct qed_dev *cdev, int tc, u8 *prio_type,
+ u8 *pgid, u8 *bw_pct, u8 *up_map)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "tc = %d\n", tc);
+ *prio_type = *pgid = *bw_pct = *up_map = 0;
+ if (tc < 0 || tc >= QED_MAX_PFC_PRIORITIES) {
+ DP_INFO(hwfn, "Invalid tc %d\n", tc);
+ return;
+ }
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return;
+
+ *pgid = dcbx_info->operational.params.ets_pri_tc_tbl[tc];
+ kfree(dcbx_info);
+}
+
+static void qed_dcbnl_getpgbwgcfgtx(struct qed_dev *cdev, int pgid, u8 *bw_pct)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+
+ *bw_pct = 0;
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "pgid = %d\n", pgid);
+ if (pgid < 0 || pgid >= QED_MAX_PFC_PRIORITIES) {
+ DP_INFO(hwfn, "Invalid pgid %d\n", pgid);
+ return;
+ }
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return;
+
+ *bw_pct = dcbx_info->operational.params.ets_tc_bw_tbl[pgid];
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "bw_pct = %d\n", *bw_pct);
+ kfree(dcbx_info);
+}
+
+static void qed_dcbnl_getpgtccfgrx(struct qed_dev *cdev, int tc, u8 *prio,
+ u8 *bwg_id, u8 *bw_pct, u8 *up_map)
+{
+ DP_INFO(QED_LEADING_HWFN(cdev), "Rx ETS is not supported\n");
+ *prio = *bwg_id = *bw_pct = *up_map = 0;
+}
+
+static void qed_dcbnl_getpgbwgcfgrx(struct qed_dev *cdev,
+ int bwg_id, u8 *bw_pct)
+{
+ DP_INFO(QED_LEADING_HWFN(cdev), "Rx ETS is not supported\n");
+ *bw_pct = 0;
+}
+
+static void qed_dcbnl_getpfccfg(struct qed_dev *cdev,
+ int priority, u8 *setting)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "priority = %d\n", priority);
+ if (priority < 0 || priority >= QED_MAX_PFC_PRIORITIES) {
+ DP_INFO(hwfn, "Invalid priority %d\n", priority);
+ return;
+ }
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return;
+
+ *setting = dcbx_info->operational.params.pfc.prio[priority];
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "setting = %d\n", *setting);
+ kfree(dcbx_info);
+}
+
+static void qed_dcbnl_setpfccfg(struct qed_dev *cdev, int priority, u8 setting)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_set dcbx_set;
+ struct qed_ptt *ptt;
+ int rc;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "priority = %d setting = %d\n",
+ priority, setting);
+ if (priority < 0 || priority >= QED_MAX_PFC_PRIORITIES) {
+ DP_INFO(hwfn, "Invalid priority %d\n", priority);
+ return;
+ }
+
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return;
+
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_PFC_CFG;
+ dcbx_set.config.params.pfc.prio[priority] = !!setting;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+ qed_ptt_release(hwfn, ptt);
+}
+
+static u8 qed_dcbnl_getcap(struct qed_dev *cdev, int capid, u8 *cap)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ int rc = 0;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "capid = %d\n", capid);
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return 1;
+
+ switch (capid) {
+ case DCB_CAP_ATTR_PG:
+ case DCB_CAP_ATTR_PFC:
+ case DCB_CAP_ATTR_UP2TC:
+ case DCB_CAP_ATTR_GSP:
+ *cap = true;
+ break;
+ case DCB_CAP_ATTR_PG_TCS:
+ case DCB_CAP_ATTR_PFC_TCS:
+ *cap = 0x80;
+ break;
+ case DCB_CAP_ATTR_DCBX:
+ *cap = (DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_VER_IEEE |
+ DCB_CAP_DCBX_STATIC);
+ break;
+ default:
+ *cap = false;
+ rc = 1;
+ }
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "id = %d caps = %d\n", capid, *cap);
+ kfree(dcbx_info);
+
+ return rc;
+}
+
+static int qed_dcbnl_getnumtcs(struct qed_dev *cdev, int tcid, u8 *num)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ int rc = 0;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "tcid = %d\n", tcid);
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return -EINVAL;
+
+ switch (tcid) {
+ case DCB_NUMTCS_ATTR_PG:
+ *num = dcbx_info->operational.params.max_ets_tc;
+ break;
+ case DCB_NUMTCS_ATTR_PFC:
+ *num = dcbx_info->operational.params.pfc.max_tc;
+ break;
+ default:
+ rc = -EINVAL;
+ }
+
+ kfree(dcbx_info);
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "numtcs = %d\n", *num);
+
+ return rc;
+}
+
+static u8 qed_dcbnl_getpfcstate(struct qed_dev *cdev)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ bool enabled;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return 0;
+
+ enabled = dcbx_info->operational.params.pfc.enabled;
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "pfc state = %d\n", enabled);
+ kfree(dcbx_info);
+
+ return enabled;
+}
+
+static u8 qed_dcbnl_getdcbx(struct qed_dev *cdev)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ u8 mode = 0;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return 0;
+
+ if (dcbx_info->operational.ieee)
+ mode |= DCB_CAP_DCBX_VER_IEEE;
+ if (dcbx_info->operational.cee)
+ mode |= DCB_CAP_DCBX_VER_CEE;
+ if (dcbx_info->operational.local)
+ mode |= DCB_CAP_DCBX_STATIC;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "dcb mode = %d\n", mode);
+ kfree(dcbx_info);
+
+ return mode;
+}
+
+static void qed_dcbnl_setpgtccfgtx(struct qed_dev *cdev,
+ int tc,
+ u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_set dcbx_set;
+ struct qed_ptt *ptt;
+ int rc;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB,
+ "tc = %d pri_type = %d pgid = %d bw_pct = %d up_map = %d\n",
+ tc, pri_type, pgid, bw_pct, up_map);
+
+ if (tc < 0 || tc >= QED_MAX_PFC_PRIORITIES) {
+ DP_INFO(hwfn, "Invalid tc %d\n", tc);
+ return;
+ }
+
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return;
+
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_ETS_CFG;
+ dcbx_set.config.params.ets_pri_tc_tbl[tc] = pgid;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+ qed_ptt_release(hwfn, ptt);
+}
+
+static void qed_dcbnl_setpgtccfgrx(struct qed_dev *cdev, int prio,
+ u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map)
+{
+ DP_INFO(QED_LEADING_HWFN(cdev), "Rx ETS is not supported\n");
+}
+
+static void qed_dcbnl_setpgbwgcfgtx(struct qed_dev *cdev, int pgid, u8 bw_pct)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_set dcbx_set;
+ struct qed_ptt *ptt;
+ int rc;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "pgid = %d bw_pct = %d\n", pgid, bw_pct);
+ if (pgid < 0 || pgid >= QED_MAX_PFC_PRIORITIES) {
+ DP_INFO(hwfn, "Invalid pgid %d\n", pgid);
+ return;
+ }
+
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return;
+
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_ETS_CFG;
+ dcbx_set.config.params.ets_tc_bw_tbl[pgid] = bw_pct;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+ qed_ptt_release(hwfn, ptt);
+}
+
+static void qed_dcbnl_setpgbwgcfgrx(struct qed_dev *cdev, int pgid, u8 bw_pct)
+{
+ DP_INFO(QED_LEADING_HWFN(cdev), "Rx ETS is not supported\n");
+}
+
+static u8 qed_dcbnl_setall(struct qed_dev *cdev)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_set dcbx_set;
+ struct qed_ptt *ptt;
+ int rc;
+
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return 1;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return 1;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 1);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return rc;
+}
+
+static int qed_dcbnl_setnumtcs(struct qed_dev *cdev, int tcid, u8 num)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_set dcbx_set;
+ struct qed_ptt *ptt;
+ int rc;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "tcid = %d num = %d\n", tcid, num);
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return 1;
+
+ switch (tcid) {
+ case DCB_NUMTCS_ATTR_PG:
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_ETS_CFG;
+ dcbx_set.config.params.max_ets_tc = num;
+ break;
+ case DCB_NUMTCS_ATTR_PFC:
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_PFC_CFG;
+ dcbx_set.config.params.pfc.max_tc = num;
+ break;
+ default:
+ DP_INFO(hwfn, "Invalid tcid %d\n", tcid);
+ return -EINVAL;
+ }
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EINVAL;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return 0;
+}
+
+static void qed_dcbnl_setpfcstate(struct qed_dev *cdev, u8 state)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_set dcbx_set;
+ struct qed_ptt *ptt;
+ int rc;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "new state = %d\n", state);
+
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return;
+
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_PFC_CFG;
+ dcbx_set.config.params.pfc.enabled = !!state;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+ qed_ptt_release(hwfn, ptt);
+}
+
+static int qed_dcbnl_getapp(struct qed_dev *cdev, u8 idtype, u16 idval)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ struct qed_app_entry *entry;
+ bool ethtype;
+ u8 prio = 0;
+ int i;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return -EINVAL;
+
+ ethtype = !!(idtype == DCB_APP_IDTYPE_ETHTYPE);
+ for (i = 0; i < QED_DCBX_MAX_APP_PROTOCOL; i++) {
+ entry = &dcbx_info->operational.params.app_entry[i];
+ if ((entry->ethtype == ethtype) && (entry->proto_id == idval)) {
+ prio = entry->prio;
+ break;
+ }
+ }
+
+ if (i == QED_DCBX_MAX_APP_PROTOCOL) {
+ DP_ERR(cdev, "App entry (%d, %d) not found\n", idtype, idval);
+ kfree(dcbx_info);
+ return -EINVAL;
+ }
+
+ kfree(dcbx_info);
+
+ return prio;
+}
+
+static int qed_dcbnl_setapp(struct qed_dev *cdev,
+ u8 idtype, u16 idval, u8 pri_map)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_set dcbx_set;
+ struct qed_app_entry *entry;
+ struct qed_ptt *ptt;
+ bool ethtype;
+ int rc, i;
+
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return -EINVAL;
+
+ ethtype = !!(idtype == DCB_APP_IDTYPE_ETHTYPE);
+ for (i = 0; i < QED_DCBX_MAX_APP_PROTOCOL; i++) {
+ entry = &dcbx_set.config.params.app_entry[i];
+ if ((entry->ethtype == ethtype) && (entry->proto_id == idval))
+ break;
+ /* First empty slot */
+ if (!entry->proto_id) {
+ dcbx_set.config.params.num_app_entries++;
+ break;
+ }
+ }
+
+ if (i == QED_DCBX_MAX_APP_PROTOCOL) {
+ DP_ERR(cdev, "App table is full\n");
+ return -EBUSY;
+ }
+
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_APP_CFG;
+ dcbx_set.config.params.app_entry[i].ethtype = ethtype;
+ dcbx_set.config.params.app_entry[i].proto_id = idval;
+ dcbx_set.config.params.app_entry[i].prio = pri_map;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EBUSY;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return rc;
+}
+
+static u8 qed_dcbnl_setdcbx(struct qed_dev *cdev, u8 mode)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_set dcbx_set;
+ struct qed_ptt *ptt;
+ int rc;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "new mode = %x\n", mode);
+
+ if (!(mode & DCB_CAP_DCBX_VER_IEEE) &&
+ !(mode & DCB_CAP_DCBX_VER_CEE) && !(mode & DCB_CAP_DCBX_STATIC)) {
+ DP_INFO(hwfn, "Allowed modes are cee, ieee or static\n");
+ return 1;
+ }
+
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return 1;
+
+ dcbx_set.ver_num = 0;
+ if (mode & DCB_CAP_DCBX_VER_CEE) {
+ dcbx_set.ver_num |= DCBX_CONFIG_VERSION_CEE;
+ dcbx_set.enabled = true;
+ }
+
+ if (mode & DCB_CAP_DCBX_VER_IEEE) {
+ dcbx_set.ver_num |= DCBX_CONFIG_VERSION_IEEE;
+ dcbx_set.enabled = true;
+ }
+
+ if (mode & DCB_CAP_DCBX_STATIC) {
+ dcbx_set.ver_num |= DCBX_CONFIG_VERSION_STATIC;
+ dcbx_set.enabled = true;
+ }
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return 1;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return rc;
+}
+
+static u8 qed_dcbnl_getfeatcfg(struct qed_dev *cdev, int featid, u8 *flags)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "Feature id = %d\n", featid);
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return 1;
+
+ *flags = 0;
+ switch (featid) {
+ case DCB_FEATCFG_ATTR_PG:
+ if (dcbx_info->operational.params.ets_enabled)
+ *flags = DCB_FEATCFG_ENABLE;
+ else
+ *flags = DCB_FEATCFG_ERROR;
+ break;
+ case DCB_FEATCFG_ATTR_PFC:
+ if (dcbx_info->operational.params.pfc.enabled)
+ *flags = DCB_FEATCFG_ENABLE;
+ else
+ *flags = DCB_FEATCFG_ERROR;
+ break;
+ case DCB_FEATCFG_ATTR_APP:
+ if (dcbx_info->operational.params.app_valid)
+ *flags = DCB_FEATCFG_ENABLE;
+ else
+ *flags = DCB_FEATCFG_ERROR;
+ break;
+ default:
+ DP_INFO(hwfn, "Invalid feature-ID %d\n", featid);
+ kfree(dcbx_info);
+ return 1;
+ }
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "flags = %d\n", *flags);
+ kfree(dcbx_info);
+
+ return 0;
+}
+
+static u8 qed_dcbnl_setfeatcfg(struct qed_dev *cdev, int featid, u8 flags)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_set dcbx_set;
+ bool enabled, willing;
+ struct qed_ptt *ptt;
+ int rc;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "featid = %d flags = %d\n",
+ featid, flags);
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return 1;
+
+ enabled = !!(flags & DCB_FEATCFG_ENABLE);
+ willing = !!(flags & DCB_FEATCFG_WILLING);
+ switch (featid) {
+ case DCB_FEATCFG_ATTR_PG:
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_ETS_CFG;
+ dcbx_set.config.params.ets_enabled = enabled;
+ dcbx_set.config.params.ets_willing = willing;
+ break;
+ case DCB_FEATCFG_ATTR_PFC:
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_PFC_CFG;
+ dcbx_set.config.params.pfc.enabled = enabled;
+ dcbx_set.config.params.pfc.willing = willing;
+ break;
+ case DCB_FEATCFG_ATTR_APP:
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_APP_CFG;
+ dcbx_set.config.params.app_willing = willing;
+ break;
+ default:
+ DP_INFO(hwfn, "Invalid feature-ID %d\n", featid);
+ return 1;
+ }
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return 1;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return 0;
+}
+
+static int qed_dcbnl_peer_getappinfo(struct qed_dev *cdev,
+ struct dcb_peer_app_info *info,
+ u16 *app_count)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_REMOTE_MIB);
+ if (!dcbx_info)
+ return -EINVAL;
+
+ info->willing = dcbx_info->remote.params.app_willing;
+ info->error = dcbx_info->remote.params.app_error;
+ *app_count = dcbx_info->remote.params.num_app_entries;
+ kfree(dcbx_info);
+
+ return 0;
+}
+
+static int qed_dcbnl_peer_getapptable(struct qed_dev *cdev,
+ struct dcb_app *table)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ int i;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_REMOTE_MIB);
+ if (!dcbx_info)
+ return -EINVAL;
+
+ for (i = 0; i < dcbx_info->remote.params.num_app_entries; i++) {
+ if (dcbx_info->remote.params.app_entry[i].ethtype)
+ table[i].selector = DCB_APP_IDTYPE_ETHTYPE;
+ else
+ table[i].selector = DCB_APP_IDTYPE_PORTNUM;
+ table[i].priority = dcbx_info->remote.params.app_entry[i].prio;
+ table[i].protocol =
+ dcbx_info->remote.params.app_entry[i].proto_id;
+ }
+
+ kfree(dcbx_info);
+
+ return 0;
+}
+
+static int qed_dcbnl_cee_peer_getpfc(struct qed_dev *cdev, struct cee_pfc *pfc)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ int i;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_REMOTE_MIB);
+ if (!dcbx_info)
+ return -EINVAL;
+
+ for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++)
+ if (dcbx_info->remote.params.pfc.prio[i])
+ pfc->pfc_en |= BIT(i);
+
+ pfc->tcs_supported = dcbx_info->remote.params.pfc.max_tc;
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "pfc state = %d tcs_supported = %d\n",
+ pfc->pfc_en, pfc->tcs_supported);
+ kfree(dcbx_info);
+
+ return 0;
+}
+
+static int qed_dcbnl_cee_peer_getpg(struct qed_dev *cdev, struct cee_pg *pg)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ int i;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_REMOTE_MIB);
+ if (!dcbx_info)
+ return -EINVAL;
+
+ pg->willing = dcbx_info->remote.params.ets_willing;
+ for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) {
+ pg->pg_bw[i] = dcbx_info->remote.params.ets_tc_bw_tbl[i];
+ pg->prio_pg[i] = dcbx_info->remote.params.ets_pri_tc_tbl[i];
+ }
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "willing = %d", pg->willing);
+ kfree(dcbx_info);
+
+ return 0;
+}
+
+static int qed_dcbnl_get_ieee_pfc(struct qed_dev *cdev,
+ struct ieee_pfc *pfc, bool remote)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_params *params;
+ struct qed_dcbx_get *dcbx_info;
+ int rc, i;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return -EINVAL;
+
+ if (!dcbx_info->operational.ieee) {
+ DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n");
+ kfree(dcbx_info);
+ return -EINVAL;
+ }
+
+ if (remote) {
+ memset(dcbx_info, 0, sizeof(*dcbx_info));
+ rc = qed_dcbx_query_params(hwfn, dcbx_info,
+ QED_DCBX_REMOTE_MIB);
+ if (rc) {
+ kfree(dcbx_info);
+ return -EINVAL;
+ }
+
+ params = &dcbx_info->remote.params;
+ } else {
+ params = &dcbx_info->operational.params;
+ }
+
+ pfc->pfc_cap = params->pfc.max_tc;
+ pfc->pfc_en = 0;
+ for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++)
+ if (params->pfc.prio[i])
+ pfc->pfc_en |= BIT(i);
+
+ kfree(dcbx_info);
+
+ return 0;
+}
+
+static int qed_dcbnl_ieee_getpfc(struct qed_dev *cdev, struct ieee_pfc *pfc)
+{
+ return qed_dcbnl_get_ieee_pfc(cdev, pfc, false);
+}
+
+static int qed_dcbnl_ieee_setpfc(struct qed_dev *cdev, struct ieee_pfc *pfc)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ struct qed_dcbx_set dcbx_set;
+ struct qed_ptt *ptt;
+ int rc, i;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return -EINVAL;
+
+ if (!dcbx_info->operational.ieee) {
+ DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n");
+ kfree(dcbx_info);
+ return -EINVAL;
+ }
+
+ kfree(dcbx_info);
+
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return -EINVAL;
+
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_PFC_CFG;
+ for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++)
+ dcbx_set.config.params.pfc.prio[i] = !!(pfc->pfc_en & BIT(i));
+
+ dcbx_set.config.params.pfc.max_tc = pfc->pfc_cap;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EINVAL;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return rc;
+}
+
+static int qed_dcbnl_get_ieee_ets(struct qed_dev *cdev,
+ struct ieee_ets *ets, bool remote)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ struct qed_dcbx_params *params;
+ int rc;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return -EINVAL;
+
+ if (!dcbx_info->operational.ieee) {
+ DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n");
+ kfree(dcbx_info);
+ return -EINVAL;
+ }
+
+ if (remote) {
+ memset(dcbx_info, 0, sizeof(*dcbx_info));
+ rc = qed_dcbx_query_params(hwfn, dcbx_info,
+ QED_DCBX_REMOTE_MIB);
+ if (rc) {
+ kfree(dcbx_info);
+ return -EINVAL;
+ }
+
+ params = &dcbx_info->remote.params;
+ } else {
+ params = &dcbx_info->operational.params;
+ }
+
+ ets->ets_cap = params->max_ets_tc;
+ ets->willing = params->ets_willing;
+ ets->cbs = params->ets_cbs;
+ memcpy(ets->tc_tx_bw, params->ets_tc_bw_tbl, sizeof(ets->tc_tx_bw));
+ memcpy(ets->tc_tsa, params->ets_tc_tsa_tbl, sizeof(ets->tc_tsa));
+ memcpy(ets->prio_tc, params->ets_pri_tc_tbl, sizeof(ets->prio_tc));
+ kfree(dcbx_info);
+
+ return 0;
+}
+
+static int qed_dcbnl_ieee_getets(struct qed_dev *cdev, struct ieee_ets *ets)
+{
+ return qed_dcbnl_get_ieee_ets(cdev, ets, false);
+}
+
+static int qed_dcbnl_ieee_setets(struct qed_dev *cdev, struct ieee_ets *ets)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ struct qed_dcbx_set dcbx_set;
+ struct qed_ptt *ptt;
+ int rc;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return -EINVAL;
+
+ if (!dcbx_info->operational.ieee) {
+ DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n");
+ kfree(dcbx_info);
+ return -EINVAL;
+ }
+
+ kfree(dcbx_info);
+
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return -EINVAL;
+
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_ETS_CFG;
+ dcbx_set.config.params.max_ets_tc = ets->ets_cap;
+ dcbx_set.config.params.ets_willing = ets->willing;
+ dcbx_set.config.params.ets_cbs = ets->cbs;
+ memcpy(dcbx_set.config.params.ets_tc_bw_tbl, ets->tc_tx_bw,
+ sizeof(ets->tc_tx_bw));
+ memcpy(dcbx_set.config.params.ets_tc_tsa_tbl, ets->tc_tsa,
+ sizeof(ets->tc_tsa));
+ memcpy(dcbx_set.config.params.ets_pri_tc_tbl, ets->prio_tc,
+ sizeof(ets->prio_tc));
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EINVAL;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return rc;
+}
+
+static int
+qed_dcbnl_ieee_peer_getets(struct qed_dev *cdev, struct ieee_ets *ets)
+{
+ return qed_dcbnl_get_ieee_ets(cdev, ets, true);
+}
+
+static int
+qed_dcbnl_ieee_peer_getpfc(struct qed_dev *cdev, struct ieee_pfc *pfc)
+{
+ return qed_dcbnl_get_ieee_pfc(cdev, pfc, true);
+}
+
+static int qed_get_sf_ieee_value(u8 selector, u8 *sf_ieee)
+{
+ switch (selector) {
+ case IEEE_8021QAZ_APP_SEL_ETHERTYPE:
+ *sf_ieee = QED_DCBX_SF_IEEE_ETHTYPE;
+ break;
+ case IEEE_8021QAZ_APP_SEL_STREAM:
+ *sf_ieee = QED_DCBX_SF_IEEE_TCP_PORT;
+ break;
+ case IEEE_8021QAZ_APP_SEL_DGRAM:
+ *sf_ieee = QED_DCBX_SF_IEEE_UDP_PORT;
+ break;
+ case IEEE_8021QAZ_APP_SEL_ANY:
+ *sf_ieee = QED_DCBX_SF_IEEE_TCP_UDP_PORT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int qed_dcbnl_ieee_getapp(struct qed_dev *cdev, struct dcb_app *app)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ struct qed_app_entry *entry;
+ u8 prio = 0;
+ u8 sf_ieee;
+ int i;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "selector = %d protocol = %d\n",
+ app->selector, app->protocol);
+
+ if (qed_get_sf_ieee_value(app->selector, &sf_ieee)) {
+ DP_INFO(cdev, "Invalid selector field value %d\n",
+ app->selector);
+ return -EINVAL;
+ }
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return -EINVAL;
+
+ if (!dcbx_info->operational.ieee) {
+ DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n");
+ kfree(dcbx_info);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < QED_DCBX_MAX_APP_PROTOCOL; i++) {
+ entry = &dcbx_info->operational.params.app_entry[i];
+ if ((entry->sf_ieee == sf_ieee) &&
+ (entry->proto_id == app->protocol)) {
+ prio = entry->prio;
+ break;
+ }
+ }
+
+ if (i == QED_DCBX_MAX_APP_PROTOCOL) {
+ DP_ERR(cdev, "App entry (%d, %d) not found\n", app->selector,
+ app->protocol);
+ kfree(dcbx_info);
+ return -EINVAL;
+ }
+
+ app->priority = ffs(prio) - 1;
+
+ kfree(dcbx_info);
+
+ return 0;
+}
+
+static int qed_dcbnl_ieee_setapp(struct qed_dev *cdev, struct dcb_app *app)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ struct qed_dcbx_set dcbx_set;
+ struct qed_app_entry *entry;
+ struct qed_ptt *ptt;
+ u8 sf_ieee;
+ int rc, i;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "selector = %d protocol = %d pri = %d\n",
+ app->selector, app->protocol, app->priority);
+ if (app->priority >= QED_MAX_PFC_PRIORITIES) {
+ DP_INFO(hwfn, "Invalid priority %d\n", app->priority);
+ return -EINVAL;
+ }
+
+ if (qed_get_sf_ieee_value(app->selector, &sf_ieee)) {
+ DP_INFO(cdev, "Invalid selector field value %d\n",
+ app->selector);
+ return -EINVAL;
+ }
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return -EINVAL;
+
+ if (!dcbx_info->operational.ieee) {
+ DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n");
+ kfree(dcbx_info);
+ return -EINVAL;
+ }
+
+ kfree(dcbx_info);
+
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return -EINVAL;
+
+ for (i = 0; i < QED_DCBX_MAX_APP_PROTOCOL; i++) {
+ entry = &dcbx_set.config.params.app_entry[i];
+ if ((entry->sf_ieee == sf_ieee) &&
+ (entry->proto_id == app->protocol))
+ break;
+ /* First empty slot */
+ if (!entry->proto_id) {
+ dcbx_set.config.params.num_app_entries++;
+ break;
+ }
+ }
+
+ if (i == QED_DCBX_MAX_APP_PROTOCOL) {
+ DP_ERR(cdev, "App table is full\n");
+ return -EBUSY;
+ }
+
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_APP_CFG;
+ dcbx_set.config.params.app_entry[i].sf_ieee = sf_ieee;
+ dcbx_set.config.params.app_entry[i].proto_id = app->protocol;
+ dcbx_set.config.params.app_entry[i].prio = BIT(app->priority);
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EBUSY;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return rc;
+}
+
+const struct qed_eth_dcbnl_ops qed_dcbnl_ops_pass = {
+ .getstate = qed_dcbnl_getstate,
+ .setstate = qed_dcbnl_setstate,
+ .getpgtccfgtx = qed_dcbnl_getpgtccfgtx,
+ .getpgbwgcfgtx = qed_dcbnl_getpgbwgcfgtx,
+ .getpgtccfgrx = qed_dcbnl_getpgtccfgrx,
+ .getpgbwgcfgrx = qed_dcbnl_getpgbwgcfgrx,
+ .getpfccfg = qed_dcbnl_getpfccfg,
+ .setpfccfg = qed_dcbnl_setpfccfg,
+ .getcap = qed_dcbnl_getcap,
+ .getnumtcs = qed_dcbnl_getnumtcs,
+ .getpfcstate = qed_dcbnl_getpfcstate,
+ .getdcbx = qed_dcbnl_getdcbx,
+ .setpgtccfgtx = qed_dcbnl_setpgtccfgtx,
+ .setpgtccfgrx = qed_dcbnl_setpgtccfgrx,
+ .setpgbwgcfgtx = qed_dcbnl_setpgbwgcfgtx,
+ .setpgbwgcfgrx = qed_dcbnl_setpgbwgcfgrx,
+ .setall = qed_dcbnl_setall,
+ .setnumtcs = qed_dcbnl_setnumtcs,
+ .setpfcstate = qed_dcbnl_setpfcstate,
+ .setapp = qed_dcbnl_setapp,
+ .setdcbx = qed_dcbnl_setdcbx,
+ .setfeatcfg = qed_dcbnl_setfeatcfg,
+ .getfeatcfg = qed_dcbnl_getfeatcfg,
+ .getapp = qed_dcbnl_getapp,
+ .peer_getappinfo = qed_dcbnl_peer_getappinfo,
+ .peer_getapptable = qed_dcbnl_peer_getapptable,
+ .cee_peer_getpfc = qed_dcbnl_cee_peer_getpfc,
+ .cee_peer_getpg = qed_dcbnl_cee_peer_getpg,
+ .ieee_getpfc = qed_dcbnl_ieee_getpfc,
+ .ieee_setpfc = qed_dcbnl_ieee_setpfc,
+ .ieee_getets = qed_dcbnl_ieee_getets,
+ .ieee_setets = qed_dcbnl_ieee_setets,
+ .ieee_peer_getpfc = qed_dcbnl_ieee_peer_getpfc,
+ .ieee_peer_getets = qed_dcbnl_ieee_peer_getets,
+ .ieee_getapp = qed_dcbnl_ieee_getapp,
+ .ieee_setapp = qed_dcbnl_ieee_setapp,
+};
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
new file mode 100644
index 000000000..ea839e605
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ * Copyright (c) 2019-2020 Marvell International Ltd.
+ */
+
+#ifndef _QED_DCBX_H
+#define _QED_DCBX_H
+#include <linux/types.h>
+#include <linux/slab.h>
+#include "qed.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+
+#define DCBX_CONFIG_MAX_APP_PROTOCOL 4
+
+enum qed_mib_read_type {
+ QED_DCBX_OPERATIONAL_MIB,
+ QED_DCBX_REMOTE_MIB,
+ QED_DCBX_LOCAL_MIB,
+ QED_DCBX_REMOTE_LLDP_MIB,
+ QED_DCBX_LOCAL_LLDP_MIB
+};
+
+struct qed_dcbx_app_data {
+ bool enable; /* DCB enabled */
+ u8 update; /* Update indication */
+ u8 priority; /* Priority */
+ u8 tc; /* Traffic Class */
+ bool dont_add_vlan0; /* Do not insert a vlan tag with id 0 */
+};
+
+#define QED_DCBX_VERSION_DISABLED 0
+#define QED_DCBX_VERSION_IEEE 1
+#define QED_DCBX_VERSION_CEE 2
+
+struct qed_dcbx_set {
+#define QED_DCBX_OVERRIDE_STATE BIT(0)
+#define QED_DCBX_OVERRIDE_PFC_CFG BIT(1)
+#define QED_DCBX_OVERRIDE_ETS_CFG BIT(2)
+#define QED_DCBX_OVERRIDE_APP_CFG BIT(3)
+#define QED_DCBX_OVERRIDE_DSCP_CFG BIT(4)
+ u32 override_flags;
+ bool enabled;
+ struct qed_dcbx_admin_params config;
+ u32 ver_num;
+};
+
+struct qed_dcbx_results {
+ bool dcbx_enabled;
+ u8 pf_id;
+ struct qed_dcbx_app_data arr[DCBX_MAX_PROTOCOL_TYPE];
+};
+
+struct qed_dcbx_app_metadata {
+ enum dcbx_protocol_type id;
+ char *name;
+ enum qed_pci_personality personality;
+};
+
+struct qed_dcbx_info {
+ struct lldp_status_params_s lldp_remote[LLDP_MAX_LLDP_AGENTS];
+ struct lldp_config_params_s lldp_local[LLDP_MAX_LLDP_AGENTS];
+ struct dcbx_local_params local_admin;
+ struct qed_dcbx_results results;
+ struct dcbx_mib operational;
+ struct dcbx_mib remote;
+ struct qed_dcbx_set set;
+ struct qed_dcbx_get get;
+ u8 dcbx_cap;
+};
+
+struct qed_dcbx_mib_meta_data {
+ struct lldp_config_params_s *lldp_local;
+ struct lldp_status_params_s *lldp_remote;
+ struct dcbx_local_params *local_admin;
+ struct dcbx_mib *mib;
+ size_t size;
+ u32 addr;
+};
+
+extern const struct qed_eth_dcbnl_ops qed_dcbnl_ops_pass;
+
+#ifdef CONFIG_DCB
+int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn,
+ struct qed_dcbx_set *params);
+
+int qed_dcbx_config_params(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ struct qed_dcbx_set *params, bool hw_commit);
+#endif
+
+/* QED local interface routines */
+int
+qed_dcbx_mib_update_event(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ enum qed_mib_read_type type);
+
+int qed_dcbx_info_alloc(struct qed_hwfn *p_hwfn);
+void qed_dcbx_info_free(struct qed_hwfn *p_hwfn);
+void qed_dcbx_set_pf_update_params(struct qed_dcbx_results *p_src,
+ struct pf_update_ramrod_data *p_dest);
+
+#define QED_DCBX_DEFAULT_TC 0
+
+u8 qed_dcbx_get_priority_tc(struct qed_hwfn *p_hwfn, u8 pri);
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
new file mode 100644
index 000000000..cdcead614
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -0,0 +1,8712 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2019-2021 Marvell International Ltd.
+ */
+
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/crc32.h>
+#include "qed.h"
+#include "qed_cxt.h"
+#include "qed_hsi.h"
+#include "qed_dbg_hsi.h"
+#include "qed_hw.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+
+/* Memory groups enum */
+enum mem_groups {
+ MEM_GROUP_PXP_MEM,
+ MEM_GROUP_DMAE_MEM,
+ MEM_GROUP_CM_MEM,
+ MEM_GROUP_QM_MEM,
+ MEM_GROUP_DORQ_MEM,
+ MEM_GROUP_BRB_RAM,
+ MEM_GROUP_BRB_MEM,
+ MEM_GROUP_PRS_MEM,
+ MEM_GROUP_SDM_MEM,
+ MEM_GROUP_PBUF,
+ MEM_GROUP_IOR,
+ MEM_GROUP_RAM,
+ MEM_GROUP_BTB_RAM,
+ MEM_GROUP_RDIF_CTX,
+ MEM_GROUP_TDIF_CTX,
+ MEM_GROUP_CFC_MEM,
+ MEM_GROUP_CONN_CFC_MEM,
+ MEM_GROUP_CAU_PI,
+ MEM_GROUP_CAU_MEM,
+ MEM_GROUP_CAU_MEM_EXT,
+ MEM_GROUP_PXP_ILT,
+ MEM_GROUP_MULD_MEM,
+ MEM_GROUP_BTB_MEM,
+ MEM_GROUP_IGU_MEM,
+ MEM_GROUP_IGU_MSIX,
+ MEM_GROUP_CAU_SB,
+ MEM_GROUP_BMB_RAM,
+ MEM_GROUP_BMB_MEM,
+ MEM_GROUP_TM_MEM,
+ MEM_GROUP_TASK_CFC_MEM,
+ MEM_GROUPS_NUM
+};
+
+/* Memory groups names */
+static const char * const s_mem_group_names[] = {
+ "PXP_MEM",
+ "DMAE_MEM",
+ "CM_MEM",
+ "QM_MEM",
+ "DORQ_MEM",
+ "BRB_RAM",
+ "BRB_MEM",
+ "PRS_MEM",
+ "SDM_MEM",
+ "PBUF",
+ "IOR",
+ "RAM",
+ "BTB_RAM",
+ "RDIF_CTX",
+ "TDIF_CTX",
+ "CFC_MEM",
+ "CONN_CFC_MEM",
+ "CAU_PI",
+ "CAU_MEM",
+ "CAU_MEM_EXT",
+ "PXP_ILT",
+ "MULD_MEM",
+ "BTB_MEM",
+ "IGU_MEM",
+ "IGU_MSIX",
+ "CAU_SB",
+ "BMB_RAM",
+ "BMB_MEM",
+ "TM_MEM",
+ "TASK_CFC_MEM",
+};
+
+/* Idle check conditions */
+
+static u32 cond5(const u32 *r, const u32 *imm)
+{
+ return ((r[0] & imm[0]) != imm[1]) && ((r[1] & imm[2]) != imm[3]);
+}
+
+static u32 cond7(const u32 *r, const u32 *imm)
+{
+ return ((r[0] >> imm[0]) & imm[1]) != imm[2];
+}
+
+static u32 cond6(const u32 *r, const u32 *imm)
+{
+ return (r[0] & imm[0]) != imm[1];
+}
+
+static u32 cond9(const u32 *r, const u32 *imm)
+{
+ return ((r[0] & imm[0]) >> imm[1]) !=
+ (((r[0] & imm[2]) >> imm[3]) | ((r[1] & imm[4]) << imm[5]));
+}
+
+static u32 cond10(const u32 *r, const u32 *imm)
+{
+ return ((r[0] & imm[0]) >> imm[1]) != (r[0] & imm[2]);
+}
+
+static u32 cond4(const u32 *r, const u32 *imm)
+{
+ return (r[0] & ~imm[0]) != imm[1];
+}
+
+static u32 cond0(const u32 *r, const u32 *imm)
+{
+ return (r[0] & ~r[1]) != imm[0];
+}
+
+static u32 cond14(const u32 *r, const u32 *imm)
+{
+ return (r[0] | imm[0]) != imm[1];
+}
+
+static u32 cond1(const u32 *r, const u32 *imm)
+{
+ return r[0] != imm[0];
+}
+
+static u32 cond11(const u32 *r, const u32 *imm)
+{
+ return r[0] != r[1] && r[2] == imm[0];
+}
+
+static u32 cond12(const u32 *r, const u32 *imm)
+{
+ return r[0] != r[1] && r[2] > imm[0];
+}
+
+static u32 cond3(const u32 *r, const u32 *imm)
+{
+ return r[0] != r[1];
+}
+
+static u32 cond13(const u32 *r, const u32 *imm)
+{
+ return r[0] & imm[0];
+}
+
+static u32 cond8(const u32 *r, const u32 *imm)
+{
+ return r[0] < (r[1] - imm[0]);
+}
+
+static u32 cond2(const u32 *r, const u32 *imm)
+{
+ return r[0] > imm[0];
+}
+
+/* Array of Idle Check conditions */
+static u32(*cond_arr[]) (const u32 *r, const u32 *imm) = {
+ cond0,
+ cond1,
+ cond2,
+ cond3,
+ cond4,
+ cond5,
+ cond6,
+ cond7,
+ cond8,
+ cond9,
+ cond10,
+ cond11,
+ cond12,
+ cond13,
+ cond14,
+};
+
+#define NUM_PHYS_BLOCKS 84
+
+#define NUM_DBG_RESET_REGS 8
+
+/******************************* Data Types **********************************/
+
+enum hw_types {
+ HW_TYPE_ASIC,
+ PLATFORM_RESERVED,
+ PLATFORM_RESERVED2,
+ PLATFORM_RESERVED3,
+ PLATFORM_RESERVED4,
+ MAX_HW_TYPES
+};
+
+/* CM context types */
+enum cm_ctx_types {
+ CM_CTX_CONN_AG,
+ CM_CTX_CONN_ST,
+ CM_CTX_TASK_AG,
+ CM_CTX_TASK_ST,
+ NUM_CM_CTX_TYPES
+};
+
+/* Debug bus frame modes */
+enum dbg_bus_frame_modes {
+ DBG_BUS_FRAME_MODE_4ST = 0, /* 4 Storm dwords (no HW) */
+ DBG_BUS_FRAME_MODE_2ST_2HW = 1, /* 2 Storm dwords, 2 HW dwords */
+ DBG_BUS_FRAME_MODE_1ST_3HW = 2, /* 1 Storm dwords, 3 HW dwords */
+ DBG_BUS_FRAME_MODE_4HW = 3, /* 4 HW dwords (no Storms) */
+ DBG_BUS_FRAME_MODE_8HW = 4, /* 8 HW dwords (no Storms) */
+ DBG_BUS_NUM_FRAME_MODES
+};
+
+/* Debug bus SEMI frame modes */
+enum dbg_bus_semi_frame_modes {
+ DBG_BUS_SEMI_FRAME_MODE_4FAST = 0, /* 4 fast dw */
+ DBG_BUS_SEMI_FRAME_MODE_2FAST_2SLOW = 1, /* 2 fast dw, 2 slow dw */
+ DBG_BUS_SEMI_FRAME_MODE_1FAST_3SLOW = 2, /* 1 fast dw,3 slow dw */
+ DBG_BUS_SEMI_FRAME_MODE_4SLOW = 3, /* 4 slow dw */
+ DBG_BUS_SEMI_NUM_FRAME_MODES
+};
+
+/* Debug bus filter types */
+enum dbg_bus_filter_types {
+ DBG_BUS_FILTER_TYPE_OFF, /* Filter always off */
+ DBG_BUS_FILTER_TYPE_PRE, /* Filter before trigger only */
+ DBG_BUS_FILTER_TYPE_POST, /* Filter after trigger only */
+ DBG_BUS_FILTER_TYPE_ON /* Filter always on */
+};
+
+/* Debug bus pre-trigger recording types */
+enum dbg_bus_pre_trigger_types {
+ DBG_BUS_PRE_TRIGGER_FROM_ZERO, /* Record from time 0 */
+ DBG_BUS_PRE_TRIGGER_NUM_CHUNKS, /* Record some chunks before trigger */
+ DBG_BUS_PRE_TRIGGER_DROP /* Drop data before trigger */
+};
+
+/* Debug bus post-trigger recording types */
+enum dbg_bus_post_trigger_types {
+ DBG_BUS_POST_TRIGGER_RECORD, /* Start recording after trigger */
+ DBG_BUS_POST_TRIGGER_DROP /* Drop data after trigger */
+};
+
+/* Debug bus other engine mode */
+enum dbg_bus_other_engine_modes {
+ DBG_BUS_OTHER_ENGINE_MODE_NONE,
+ DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_TX,
+ DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_RX,
+ DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_TX,
+ DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_RX
+};
+
+/* DBG block Framing mode definitions */
+struct framing_mode_defs {
+ u8 id;
+ u8 blocks_dword_mask;
+ u8 storms_dword_mask;
+ u8 semi_framing_mode_id;
+ u8 full_buf_thr;
+};
+
+/* Chip constant definitions */
+struct chip_defs {
+ const char *name;
+ u8 dwords_per_cycle;
+ u8 num_framing_modes;
+ u32 num_ilt_pages;
+ struct framing_mode_defs *framing_modes;
+};
+
+/* HW type constant definitions */
+struct hw_type_defs {
+ const char *name;
+ u32 delay_factor;
+ u32 dmae_thresh;
+ u32 log_thresh;
+};
+
+/* RBC reset definitions */
+struct rbc_reset_defs {
+ u32 reset_reg_addr;
+ u32 reset_val[MAX_CHIP_IDS];
+};
+
+/* Storm constant definitions.
+ * Addresses are in bytes, sizes are in quad-regs.
+ */
+struct storm_defs {
+ char letter;
+ enum block_id sem_block_id;
+ enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS];
+ bool has_vfc;
+ u32 sem_fast_mem_addr;
+ u32 sem_frame_mode_addr;
+ u32 sem_slow_enable_addr;
+ u32 sem_slow_mode_addr;
+ u32 sem_slow_mode1_conf_addr;
+ u32 sem_sync_dbg_empty_addr;
+ u32 sem_gpre_vect_addr;
+ u32 cm_ctx_wr_addr;
+ u32 cm_ctx_rd_addr[NUM_CM_CTX_TYPES];
+ u32 cm_ctx_lid_sizes[MAX_CHIP_IDS][NUM_CM_CTX_TYPES];
+};
+
+/* Debug Bus Constraint operation constant definitions */
+struct dbg_bus_constraint_op_defs {
+ u8 hw_op_val;
+ bool is_cyclic;
+};
+
+/* Storm Mode definitions */
+struct storm_mode_defs {
+ const char *name;
+ bool is_fast_dbg;
+ u8 id_in_hw;
+ u32 src_disable_reg_addr;
+ u32 src_enable_val;
+ bool exists[MAX_CHIP_IDS];
+};
+
+struct grc_param_defs {
+ u32 default_val[MAX_CHIP_IDS];
+ u32 min;
+ u32 max;
+ bool is_preset;
+ bool is_persistent;
+ u32 exclude_all_preset_val;
+ u32 crash_preset_val[MAX_CHIP_IDS];
+};
+
+/* Address is in 128b units. Width is in bits. */
+struct rss_mem_defs {
+ const char *mem_name;
+ const char *type_name;
+ u32 addr;
+ u32 entry_width;
+ u32 num_entries[MAX_CHIP_IDS];
+};
+
+struct vfc_ram_defs {
+ const char *mem_name;
+ const char *type_name;
+ u32 base_row;
+ u32 num_rows;
+};
+
+struct big_ram_defs {
+ const char *instance_name;
+ enum mem_groups mem_group_id;
+ enum mem_groups ram_mem_group_id;
+ enum dbg_grc_params grc_param;
+ u32 addr_reg_addr;
+ u32 data_reg_addr;
+ u32 is_256b_reg_addr;
+ u32 is_256b_bit_offset[MAX_CHIP_IDS];
+ u32 ram_size[MAX_CHIP_IDS]; /* In dwords */
+};
+
+struct phy_defs {
+ const char *phy_name;
+
+ /* PHY base GRC address */
+ u32 base_addr;
+
+ /* Relative address of indirect TBUS address register (bits 0..7) */
+ u32 tbus_addr_lo_addr;
+
+ /* Relative address of indirect TBUS address register (bits 8..10) */
+ u32 tbus_addr_hi_addr;
+
+ /* Relative address of indirect TBUS data register (bits 0..7) */
+ u32 tbus_data_lo_addr;
+
+ /* Relative address of indirect TBUS data register (bits 8..11) */
+ u32 tbus_data_hi_addr;
+};
+
+/* Split type definitions */
+struct split_type_defs {
+ const char *name;
+};
+
+/******************************** Constants **********************************/
+
+#define BYTES_IN_DWORD sizeof(u32)
+/* In the macros below, size and offset are specified in bits */
+#define CEIL_DWORDS(size) DIV_ROUND_UP(size, 32)
+#define FIELD_BIT_OFFSET(type, field) type ## _ ## field ## _ ## OFFSET
+#define FIELD_BIT_SIZE(type, field) type ## _ ## field ## _ ## SIZE
+#define FIELD_DWORD_OFFSET(type, field) \
+ ((int)(FIELD_BIT_OFFSET(type, field) / 32))
+#define FIELD_DWORD_SHIFT(type, field) (FIELD_BIT_OFFSET(type, field) % 32)
+#define FIELD_BIT_MASK(type, field) \
+ (((1 << FIELD_BIT_SIZE(type, field)) - 1) << \
+ FIELD_DWORD_SHIFT(type, field))
+
+#define SET_VAR_FIELD(var, type, field, val) \
+ do { \
+ var[FIELD_DWORD_OFFSET(type, field)] &= \
+ (~FIELD_BIT_MASK(type, field)); \
+ var[FIELD_DWORD_OFFSET(type, field)] |= \
+ (val) << FIELD_DWORD_SHIFT(type, field); \
+ } while (0)
+
+#define ARR_REG_WR(dev, ptt, addr, arr, arr_size) \
+ do { \
+ for (i = 0; i < (arr_size); i++) \
+ qed_wr(dev, ptt, addr, (arr)[i]); \
+ } while (0)
+
+#define DWORDS_TO_BYTES(dwords) ((dwords) * BYTES_IN_DWORD)
+#define BYTES_TO_DWORDS(bytes) ((bytes) / BYTES_IN_DWORD)
+
+/* extra lines include a signature line + optional latency events line */
+#define NUM_EXTRA_DBG_LINES(block) \
+ (GET_FIELD((block)->flags, DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS) ? 2 : 1)
+#define NUM_DBG_LINES(block) \
+ ((block)->num_of_dbg_bus_lines + NUM_EXTRA_DBG_LINES(block))
+
+#define USE_DMAE true
+#define PROTECT_WIDE_BUS true
+
+#define RAM_LINES_TO_DWORDS(lines) ((lines) * 2)
+#define RAM_LINES_TO_BYTES(lines) \
+ DWORDS_TO_BYTES(RAM_LINES_TO_DWORDS(lines))
+
+#define REG_DUMP_LEN_SHIFT 24
+#define MEM_DUMP_ENTRY_SIZE_DWORDS \
+ BYTES_TO_DWORDS(sizeof(struct dbg_dump_mem))
+
+#define IDLE_CHK_RULE_SIZE_DWORDS \
+ BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_rule))
+
+#define IDLE_CHK_RESULT_HDR_DWORDS \
+ BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_hdr))
+
+#define IDLE_CHK_RESULT_REG_HDR_DWORDS \
+ BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_reg_hdr))
+
+#define PAGE_MEM_DESC_SIZE_DWORDS \
+ BYTES_TO_DWORDS(sizeof(struct phys_mem_desc))
+
+#define IDLE_CHK_MAX_ENTRIES_SIZE 32
+
+/* The sizes and offsets below are specified in bits */
+#define VFC_CAM_CMD_STRUCT_SIZE 64
+#define VFC_CAM_CMD_ROW_OFFSET 48
+#define VFC_CAM_CMD_ROW_SIZE 9
+#define VFC_CAM_ADDR_STRUCT_SIZE 16
+#define VFC_CAM_ADDR_OP_OFFSET 0
+#define VFC_CAM_ADDR_OP_SIZE 4
+#define VFC_CAM_RESP_STRUCT_SIZE 256
+#define VFC_RAM_ADDR_STRUCT_SIZE 16
+#define VFC_RAM_ADDR_OP_OFFSET 0
+#define VFC_RAM_ADDR_OP_SIZE 2
+#define VFC_RAM_ADDR_ROW_OFFSET 2
+#define VFC_RAM_ADDR_ROW_SIZE 10
+#define VFC_RAM_RESP_STRUCT_SIZE 256
+
+#define VFC_CAM_CMD_DWORDS CEIL_DWORDS(VFC_CAM_CMD_STRUCT_SIZE)
+#define VFC_CAM_ADDR_DWORDS CEIL_DWORDS(VFC_CAM_ADDR_STRUCT_SIZE)
+#define VFC_CAM_RESP_DWORDS CEIL_DWORDS(VFC_CAM_RESP_STRUCT_SIZE)
+#define VFC_RAM_CMD_DWORDS VFC_CAM_CMD_DWORDS
+#define VFC_RAM_ADDR_DWORDS CEIL_DWORDS(VFC_RAM_ADDR_STRUCT_SIZE)
+#define VFC_RAM_RESP_DWORDS CEIL_DWORDS(VFC_RAM_RESP_STRUCT_SIZE)
+
+#define NUM_VFC_RAM_TYPES 4
+
+#define VFC_CAM_NUM_ROWS 512
+
+#define VFC_OPCODE_CAM_RD 14
+#define VFC_OPCODE_RAM_RD 0
+
+#define NUM_RSS_MEM_TYPES 5
+
+#define NUM_BIG_RAM_TYPES 3
+#define BIG_RAM_NAME_LEN 3
+
+#define NUM_PHY_TBUS_ADDRESSES 2048
+#define PHY_DUMP_SIZE_DWORDS (NUM_PHY_TBUS_ADDRESSES / 2)
+
+#define RESET_REG_UNRESET_OFFSET 4
+
+#define STALL_DELAY_MS 500
+
+#define STATIC_DEBUG_LINE_DWORDS 9
+
+#define NUM_COMMON_GLOBAL_PARAMS 10
+
+#define MAX_RECURSION_DEPTH 10
+
+#define FW_IMG_KUKU 0
+#define FW_IMG_MAIN 1
+#define FW_IMG_L2B 2
+
+#define REG_FIFO_ELEMENT_DWORDS 2
+#define REG_FIFO_DEPTH_ELEMENTS 32
+#define REG_FIFO_DEPTH_DWORDS \
+ (REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS)
+
+#define IGU_FIFO_ELEMENT_DWORDS 4
+#define IGU_FIFO_DEPTH_ELEMENTS 64
+#define IGU_FIFO_DEPTH_DWORDS \
+ (IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS)
+
+#define PROTECTION_OVERRIDE_ELEMENT_DWORDS 2
+#define PROTECTION_OVERRIDE_DEPTH_ELEMENTS 20
+#define PROTECTION_OVERRIDE_DEPTH_DWORDS \
+ (PROTECTION_OVERRIDE_DEPTH_ELEMENTS * \
+ PROTECTION_OVERRIDE_ELEMENT_DWORDS)
+
+#define MCP_SPAD_TRACE_OFFSIZE_ADDR \
+ (MCP_REG_SCRATCH + \
+ offsetof(struct static_init, sections[SPAD_SECTION_TRACE]))
+
+#define MAX_SW_PLTAFORM_STR_SIZE 64
+
+#define EMPTY_FW_VERSION_STR "???_???_???_???"
+#define EMPTY_FW_IMAGE_STR "???????????????"
+
+/***************************** Constant Arrays *******************************/
+
+/* DBG block framing mode definitions, in descending preference order */
+static struct framing_mode_defs s_framing_mode_defs[4] = {
+ {DBG_BUS_FRAME_MODE_4ST, 0x0, 0xf,
+ DBG_BUS_SEMI_FRAME_MODE_4FAST,
+ 10},
+ {DBG_BUS_FRAME_MODE_4HW, 0xf, 0x0, DBG_BUS_SEMI_FRAME_MODE_4SLOW,
+ 10},
+ {DBG_BUS_FRAME_MODE_2ST_2HW, 0x3, 0xc,
+ DBG_BUS_SEMI_FRAME_MODE_2FAST_2SLOW, 10},
+ {DBG_BUS_FRAME_MODE_1ST_3HW, 0x7, 0x8,
+ DBG_BUS_SEMI_FRAME_MODE_1FAST_3SLOW, 10}
+};
+
+/* Chip constant definitions array */
+static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
+ {"bb", 4, DBG_BUS_NUM_FRAME_MODES, PSWRQ2_REG_ILT_MEMORY_SIZE_BB / 2,
+ s_framing_mode_defs},
+ {"ah", 4, DBG_BUS_NUM_FRAME_MODES, PSWRQ2_REG_ILT_MEMORY_SIZE_K2 / 2,
+ s_framing_mode_defs}
+};
+
+/* Storm constant definitions array */
+static struct storm_defs s_storm_defs[] = {
+ /* Tstorm */
+ {'T', BLOCK_TSEM,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
+ true,
+ TSEM_REG_FAST_MEMORY,
+ TSEM_REG_DBG_FRAME_MODE, TSEM_REG_SLOW_DBG_ACTIVE,
+ TSEM_REG_SLOW_DBG_MODE, TSEM_REG_DBG_MODE1_CFG,
+ TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_DBG_GPRE_VECT,
+ TCM_REG_CTX_RBC_ACCS,
+ {TCM_REG_AGG_CON_CTX, TCM_REG_SM_CON_CTX, TCM_REG_AGG_TASK_CTX,
+ TCM_REG_SM_TASK_CTX},
+ {{4, 16, 2, 4}, {4, 16, 2, 4}} /* {bb} {k2} */
+ },
+
+ /* Mstorm */
+ {'M', BLOCK_MSEM,
+ {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
+ false,
+ MSEM_REG_FAST_MEMORY,
+ MSEM_REG_DBG_FRAME_MODE,
+ MSEM_REG_SLOW_DBG_ACTIVE,
+ MSEM_REG_SLOW_DBG_MODE,
+ MSEM_REG_DBG_MODE1_CFG,
+ MSEM_REG_SYNC_DBG_EMPTY,
+ MSEM_REG_DBG_GPRE_VECT,
+ MCM_REG_CTX_RBC_ACCS,
+ {MCM_REG_AGG_CON_CTX, MCM_REG_SM_CON_CTX, MCM_REG_AGG_TASK_CTX,
+ MCM_REG_SM_TASK_CTX },
+ {{1, 10, 2, 7}, {1, 10, 2, 7}} /* {bb} {k2}*/
+ },
+
+ /* Ustorm */
+ {'U', BLOCK_USEM,
+ {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
+ false,
+ USEM_REG_FAST_MEMORY,
+ USEM_REG_DBG_FRAME_MODE,
+ USEM_REG_SLOW_DBG_ACTIVE,
+ USEM_REG_SLOW_DBG_MODE,
+ USEM_REG_DBG_MODE1_CFG,
+ USEM_REG_SYNC_DBG_EMPTY,
+ USEM_REG_DBG_GPRE_VECT,
+ UCM_REG_CTX_RBC_ACCS,
+ {UCM_REG_AGG_CON_CTX, UCM_REG_SM_CON_CTX, UCM_REG_AGG_TASK_CTX,
+ UCM_REG_SM_TASK_CTX},
+ {{2, 13, 3, 3}, {2, 13, 3, 3}} /* {bb} {k2} */
+ },
+
+ /* Xstorm */
+ {'X', BLOCK_XSEM,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
+ false,
+ XSEM_REG_FAST_MEMORY,
+ XSEM_REG_DBG_FRAME_MODE,
+ XSEM_REG_SLOW_DBG_ACTIVE,
+ XSEM_REG_SLOW_DBG_MODE,
+ XSEM_REG_DBG_MODE1_CFG,
+ XSEM_REG_SYNC_DBG_EMPTY,
+ XSEM_REG_DBG_GPRE_VECT,
+ XCM_REG_CTX_RBC_ACCS,
+ {XCM_REG_AGG_CON_CTX, XCM_REG_SM_CON_CTX, 0, 0},
+ {{9, 15, 0, 0}, {9, 15, 0, 0}} /* {bb} {k2} */
+ },
+
+ /* Ystorm */
+ {'Y', BLOCK_YSEM,
+ {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
+ false,
+ YSEM_REG_FAST_MEMORY,
+ YSEM_REG_DBG_FRAME_MODE,
+ YSEM_REG_SLOW_DBG_ACTIVE,
+ YSEM_REG_SLOW_DBG_MODE,
+ YSEM_REG_DBG_MODE1_CFG,
+ YSEM_REG_SYNC_DBG_EMPTY,
+ YSEM_REG_DBG_GPRE_VECT,
+ YCM_REG_CTX_RBC_ACCS,
+ {YCM_REG_AGG_CON_CTX, YCM_REG_SM_CON_CTX, YCM_REG_AGG_TASK_CTX,
+ YCM_REG_SM_TASK_CTX},
+ {{2, 3, 2, 12}, {2, 3, 2, 12}} /* {bb} {k2} */
+ },
+
+ /* Pstorm */
+ {'P', BLOCK_PSEM,
+ {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
+ true,
+ PSEM_REG_FAST_MEMORY,
+ PSEM_REG_DBG_FRAME_MODE,
+ PSEM_REG_SLOW_DBG_ACTIVE,
+ PSEM_REG_SLOW_DBG_MODE,
+ PSEM_REG_DBG_MODE1_CFG,
+ PSEM_REG_SYNC_DBG_EMPTY,
+ PSEM_REG_DBG_GPRE_VECT,
+ PCM_REG_CTX_RBC_ACCS,
+ {0, PCM_REG_SM_CON_CTX, 0, 0},
+ {{0, 10, 0, 0}, {0, 10, 0, 0}} /* {bb} {k2} */
+ },
+};
+
+static struct hw_type_defs s_hw_type_defs[] = {
+ /* HW_TYPE_ASIC */
+ {"asic", 1, 256, 32768},
+ {"reserved", 0, 0, 0},
+ {"reserved2", 0, 0, 0},
+ {"reserved3", 0, 0, 0},
+ {"reserved4", 0, 0, 0}
+};
+
+static struct grc_param_defs s_grc_param_defs[] = {
+ /* DBG_GRC_PARAM_DUMP_TSTORM */
+ {{1, 1}, 0, 1, false, false, 1, {1, 1}},
+
+ /* DBG_GRC_PARAM_DUMP_MSTORM */
+ {{1, 1}, 0, 1, false, false, 1, {1, 1}},
+
+ /* DBG_GRC_PARAM_DUMP_USTORM */
+ {{1, 1}, 0, 1, false, false, 1, {1, 1}},
+
+ /* DBG_GRC_PARAM_DUMP_XSTORM */
+ {{1, 1}, 0, 1, false, false, 1, {1, 1}},
+
+ /* DBG_GRC_PARAM_DUMP_YSTORM */
+ {{1, 1}, 0, 1, false, false, 1, {1, 1}},
+
+ /* DBG_GRC_PARAM_DUMP_PSTORM */
+ {{1, 1}, 0, 1, false, false, 1, {1, 1}},
+
+ /* DBG_GRC_PARAM_DUMP_REGS */
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
+
+ /* DBG_GRC_PARAM_DUMP_RAM */
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
+
+ /* DBG_GRC_PARAM_DUMP_PBUF */
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
+
+ /* DBG_GRC_PARAM_DUMP_IOR */
+ {{0, 0}, 0, 1, false, false, 0, {1, 1}},
+
+ /* DBG_GRC_PARAM_DUMP_VFC */
+ {{0, 0}, 0, 1, false, false, 0, {1, 1}},
+
+ /* DBG_GRC_PARAM_DUMP_CM_CTX */
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
+
+ /* DBG_GRC_PARAM_DUMP_ILT */
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
+
+ /* DBG_GRC_PARAM_DUMP_RSS */
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
+
+ /* DBG_GRC_PARAM_DUMP_CAU */
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
+
+ /* DBG_GRC_PARAM_DUMP_QM */
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
+
+ /* DBG_GRC_PARAM_DUMP_MCP */
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
+
+ /* DBG_GRC_PARAM_DUMP_DORQ */
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
+
+ /* DBG_GRC_PARAM_DUMP_CFC */
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
+
+ /* DBG_GRC_PARAM_DUMP_IGU */
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
+
+ /* DBG_GRC_PARAM_DUMP_BRB */
+ {{0, 0}, 0, 1, false, false, 0, {1, 1}},
+
+ /* DBG_GRC_PARAM_DUMP_BTB */
+ {{0, 0}, 0, 1, false, false, 0, {1, 1}},
+
+ /* DBG_GRC_PARAM_DUMP_BMB */
+ {{0, 0}, 0, 1, false, false, 0, {0, 0}},
+
+ /* DBG_GRC_PARAM_RESERVED1 */
+ {{0, 0}, 0, 1, false, false, 0, {0, 0}},
+
+ /* DBG_GRC_PARAM_DUMP_MULD */
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
+
+ /* DBG_GRC_PARAM_DUMP_PRS */
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
+
+ /* DBG_GRC_PARAM_DUMP_DMAE */
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
+
+ /* DBG_GRC_PARAM_DUMP_TM */
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
+
+ /* DBG_GRC_PARAM_DUMP_SDM */
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
+
+ /* DBG_GRC_PARAM_DUMP_DIF */
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
+
+ /* DBG_GRC_PARAM_DUMP_STATIC */
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
+
+ /* DBG_GRC_PARAM_UNSTALL */
+ {{0, 0}, 0, 1, false, false, 0, {0, 0}},
+
+ /* DBG_GRC_PARAM_RESERVED2 */
+ {{0, 0}, 0, 1, false, false, 0, {0, 0}},
+
+ /* DBG_GRC_PARAM_MCP_TRACE_META_SIZE */
+ {{0, 0}, 1, 0xffffffff, false, true, 0, {0, 0}},
+
+ /* DBG_GRC_PARAM_EXCLUDE_ALL */
+ {{0, 0}, 0, 1, true, false, 0, {0, 0}},
+
+ /* DBG_GRC_PARAM_CRASH */
+ {{0, 0}, 0, 1, true, false, 0, {0, 0}},
+
+ /* DBG_GRC_PARAM_PARITY_SAFE */
+ {{0, 0}, 0, 1, false, false, 0, {0, 0}},
+
+ /* DBG_GRC_PARAM_DUMP_CM */
+ {{1, 1}, 0, 1, false, false, 0, {1, 1}},
+
+ /* DBG_GRC_PARAM_DUMP_PHY */
+ {{0, 0}, 0, 1, false, false, 0, {0, 0}},
+
+ /* DBG_GRC_PARAM_NO_MCP */
+ {{0, 0}, 0, 1, false, false, 0, {0, 0}},
+
+ /* DBG_GRC_PARAM_NO_FW_VER */
+ {{0, 0}, 0, 1, false, false, 0, {0, 0}},
+
+ /* DBG_GRC_PARAM_RESERVED3 */
+ {{0, 0}, 0, 1, false, false, 0, {0, 0}},
+
+ /* DBG_GRC_PARAM_DUMP_MCP_HW_DUMP */
+ {{0, 1}, 0, 1, false, false, 0, {0, 1}},
+
+ /* DBG_GRC_PARAM_DUMP_ILT_CDUC */
+ {{1, 1}, 0, 1, false, false, 0, {0, 0}},
+
+ /* DBG_GRC_PARAM_DUMP_ILT_CDUT */
+ {{1, 1}, 0, 1, false, false, 0, {0, 0}},
+
+ /* DBG_GRC_PARAM_DUMP_CAU_EXT */
+ {{0, 0}, 0, 1, false, false, 0, {1, 1}}
+};
+
+static struct rss_mem_defs s_rss_mem_defs[] = {
+ {"rss_mem_cid", "rss_cid", 0, 32,
+ {256, 320}},
+
+ {"rss_mem_key_msb", "rss_key", 1024, 256,
+ {128, 208}},
+
+ {"rss_mem_key_lsb", "rss_key", 2048, 64,
+ {128, 208}},
+
+ {"rss_mem_info", "rss_info", 3072, 16,
+ {128, 208}},
+
+ {"rss_mem_ind", "rss_ind", 4096, 16,
+ {16384, 26624}}
+};
+
+static struct vfc_ram_defs s_vfc_ram_defs[] = {
+ {"vfc_ram_tt1", "vfc_ram", 0, 512},
+ {"vfc_ram_mtt2", "vfc_ram", 512, 128},
+ {"vfc_ram_stt2", "vfc_ram", 640, 32},
+ {"vfc_ram_ro_vect", "vfc_ram", 672, 32}
+};
+
+static struct big_ram_defs s_big_ram_defs[] = {
+ {"BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB,
+ BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA,
+ MISC_REG_BLOCK_256B_EN, {0, 0},
+ {153600, 180224}},
+
+ {"BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB,
+ BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA,
+ MISC_REG_BLOCK_256B_EN, {0, 1},
+ {92160, 117760}},
+
+ {"BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB,
+ BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA,
+ MISCS_REG_BLOCK_256B_EN, {0, 0},
+ {36864, 36864}}
+};
+
+static struct rbc_reset_defs s_rbc_reset_defs[] = {
+ {MISCS_REG_RESET_PL_HV,
+ {0x0, 0x400}},
+ {MISC_REG_RESET_PL_PDA_VMAIN_1,
+ {0x4404040, 0x4404040}},
+ {MISC_REG_RESET_PL_PDA_VMAIN_2,
+ {0x7, 0x7c00007}},
+ {MISC_REG_RESET_PL_PDA_VAUX,
+ {0x2, 0x2}},
+};
+
+static struct phy_defs s_phy_defs[] = {
+ {"nw_phy", NWS_REG_NWS_CMU_K2,
+ PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2,
+ PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2,
+ PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2,
+ PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2},
+ {"sgmii_phy", MS_REG_MS_CMU_K2,
+ PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2,
+ PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2,
+ PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2,
+ PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2},
+ {"pcie_phy0", PHY_PCIE_REG_PHY0_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2},
+ {"pcie_phy1", PHY_PCIE_REG_PHY1_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2},
+};
+
+static struct split_type_defs s_split_type_defs[] = {
+ /* SPLIT_TYPE_NONE */
+ {"eng"},
+
+ /* SPLIT_TYPE_PORT */
+ {"port"},
+
+ /* SPLIT_TYPE_PF */
+ {"pf"},
+
+ /* SPLIT_TYPE_PORT_PF */
+ {"port"},
+
+ /* SPLIT_TYPE_VF */
+ {"vf"}
+};
+
+/******************************** Variables **********************************/
+
+/* The version of the calling app */
+static u32 s_app_ver;
+
+/**************************** Private Functions ******************************/
+
+static void qed_static_asserts(void)
+{
+}
+
+/* Reads and returns a single dword from the specified unaligned buffer */
+static u32 qed_read_unaligned_dword(u8 *buf)
+{
+ u32 dword;
+
+ memcpy((u8 *)&dword, buf, sizeof(dword));
+ return dword;
+}
+
+/* Sets the value of the specified GRC param */
+static void qed_grc_set_param(struct qed_hwfn *p_hwfn,
+ enum dbg_grc_params grc_param, u32 val)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+
+ dev_data->grc.param_val[grc_param] = val;
+}
+
+/* Returns the value of the specified GRC param */
+static u32 qed_grc_get_param(struct qed_hwfn *p_hwfn,
+ enum dbg_grc_params grc_param)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+
+ return dev_data->grc.param_val[grc_param];
+}
+
+/* Initializes the GRC parameters */
+static void qed_dbg_grc_init_params(struct qed_hwfn *p_hwfn)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+
+ if (!dev_data->grc.params_initialized) {
+ qed_dbg_grc_set_params_default(p_hwfn);
+ dev_data->grc.params_initialized = 1;
+ }
+}
+
+/* Sets pointer and size for the specified binary buffer type */
+static void qed_set_dbg_bin_buf(struct qed_hwfn *p_hwfn,
+ enum bin_dbg_buffer_type buf_type,
+ const u32 *ptr, u32 size)
+{
+ struct virt_mem_desc *buf = &p_hwfn->dbg_arrays[buf_type];
+
+ buf->ptr = (void *)ptr;
+ buf->size = size;
+}
+
+/* Initializes debug data for the specified device */
+static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u8 num_pfs = 0, max_pfs_per_port = 0;
+
+ if (dev_data->initialized)
+ return DBG_STATUS_OK;
+
+ if (!s_app_ver)
+ return DBG_STATUS_APP_VERSION_NOT_SET;
+
+ /* Set chip */
+ if (QED_IS_K2(p_hwfn->cdev)) {
+ dev_data->chip_id = CHIP_K2;
+ dev_data->mode_enable[MODE_K2] = 1;
+ dev_data->num_vfs = MAX_NUM_VFS_K2;
+ num_pfs = MAX_NUM_PFS_K2;
+ max_pfs_per_port = MAX_NUM_PFS_K2 / 2;
+ } else if (QED_IS_BB_B0(p_hwfn->cdev)) {
+ dev_data->chip_id = CHIP_BB;
+ dev_data->mode_enable[MODE_BB] = 1;
+ dev_data->num_vfs = MAX_NUM_VFS_BB;
+ num_pfs = MAX_NUM_PFS_BB;
+ max_pfs_per_port = MAX_NUM_PFS_BB;
+ } else {
+ return DBG_STATUS_UNKNOWN_CHIP;
+ }
+
+ /* Set HW type */
+ dev_data->hw_type = HW_TYPE_ASIC;
+ dev_data->mode_enable[MODE_ASIC] = 1;
+
+ /* Set port mode */
+ switch (p_hwfn->cdev->num_ports_in_engine) {
+ case 1:
+ dev_data->mode_enable[MODE_PORTS_PER_ENG_1] = 1;
+ break;
+ case 2:
+ dev_data->mode_enable[MODE_PORTS_PER_ENG_2] = 1;
+ break;
+ case 4:
+ dev_data->mode_enable[MODE_PORTS_PER_ENG_4] = 1;
+ break;
+ }
+
+ /* Set 100G mode */
+ if (QED_IS_CMT(p_hwfn->cdev))
+ dev_data->mode_enable[MODE_100G] = 1;
+
+ /* Set number of ports */
+ if (dev_data->mode_enable[MODE_PORTS_PER_ENG_1] ||
+ dev_data->mode_enable[MODE_100G])
+ dev_data->num_ports = 1;
+ else if (dev_data->mode_enable[MODE_PORTS_PER_ENG_2])
+ dev_data->num_ports = 2;
+ else if (dev_data->mode_enable[MODE_PORTS_PER_ENG_4])
+ dev_data->num_ports = 4;
+
+ /* Set number of PFs per port */
+ dev_data->num_pfs_per_port = min_t(u32,
+ num_pfs / dev_data->num_ports,
+ max_pfs_per_port);
+
+ /* Initializes the GRC parameters */
+ qed_dbg_grc_init_params(p_hwfn);
+
+ dev_data->use_dmae = true;
+ dev_data->initialized = 1;
+
+ return DBG_STATUS_OK;
+}
+
+static const struct dbg_block *get_dbg_block(struct qed_hwfn *p_hwfn,
+ enum block_id block_id)
+{
+ const struct dbg_block *dbg_block;
+
+ dbg_block = p_hwfn->dbg_arrays[BIN_BUF_DBG_BLOCKS].ptr;
+ return dbg_block + block_id;
+}
+
+static const struct dbg_block_chip *qed_get_dbg_block_per_chip(struct qed_hwfn
+ *p_hwfn,
+ enum block_id
+ block_id)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+
+ return (const struct dbg_block_chip *)
+ p_hwfn->dbg_arrays[BIN_BUF_DBG_BLOCKS_CHIP_DATA].ptr +
+ block_id * MAX_CHIP_IDS + dev_data->chip_id;
+}
+
+static const struct dbg_reset_reg *qed_get_dbg_reset_reg(struct qed_hwfn
+ *p_hwfn,
+ u8 reset_reg_id)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+
+ return (const struct dbg_reset_reg *)
+ p_hwfn->dbg_arrays[BIN_BUF_DBG_RESET_REGS].ptr +
+ reset_reg_id * MAX_CHIP_IDS + dev_data->chip_id;
+}
+
+/* Reads the FW info structure for the specified Storm from the chip,
+ * and writes it to the specified fw_info pointer.
+ */
+static void qed_read_storm_fw_info(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u8 storm_id, struct fw_info *fw_info)
+{
+ struct storm_defs *storm = &s_storm_defs[storm_id];
+ struct fw_info_location fw_info_location;
+ u32 addr, i, size, *dest;
+
+ memset(&fw_info_location, 0, sizeof(fw_info_location));
+ memset(fw_info, 0, sizeof(*fw_info));
+
+ /* Read first the address that points to fw_info location.
+ * The address is located in the last line of the Storm RAM.
+ */
+ addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM +
+ DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE) -
+ sizeof(fw_info_location);
+
+ dest = (u32 *)&fw_info_location;
+ size = BYTES_TO_DWORDS(sizeof(fw_info_location));
+
+ for (i = 0; i < size; i++, addr += BYTES_IN_DWORD)
+ dest[i] = qed_rd(p_hwfn, p_ptt, addr);
+
+ /* Read FW version info from Storm RAM */
+ size = le32_to_cpu(fw_info_location.size);
+ if (!size || size > sizeof(*fw_info))
+ return;
+
+ addr = le32_to_cpu(fw_info_location.grc_addr);
+ dest = (u32 *)fw_info;
+ size = BYTES_TO_DWORDS(size);
+
+ for (i = 0; i < size; i++, addr += BYTES_IN_DWORD)
+ dest[i] = qed_rd(p_hwfn, p_ptt, addr);
+}
+
+/* Dumps the specified string to the specified buffer.
+ * Returns the dumped size in bytes.
+ */
+static u32 qed_dump_str(char *dump_buf, bool dump, const char *str)
+{
+ if (dump)
+ strcpy(dump_buf, str);
+
+ return (u32)strlen(str) + 1;
+}
+
+/* Dumps zeros to align the specified buffer to dwords.
+ * Returns the dumped size in bytes.
+ */
+static u32 qed_dump_align(char *dump_buf, bool dump, u32 byte_offset)
+{
+ u8 offset_in_dword, align_size;
+
+ offset_in_dword = (u8)(byte_offset & 0x3);
+ align_size = offset_in_dword ? BYTES_IN_DWORD - offset_in_dword : 0;
+
+ if (dump && align_size)
+ memset(dump_buf, 0, align_size);
+
+ return align_size;
+}
+
+/* Writes the specified string param to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_dump_str_param(u32 *dump_buf,
+ bool dump,
+ const char *param_name, const char *param_val)
+{
+ char *char_buf = (char *)dump_buf;
+ u32 offset = 0;
+
+ /* Dump param name */
+ offset += qed_dump_str(char_buf + offset, dump, param_name);
+
+ /* Indicate a string param value */
+ if (dump)
+ *(char_buf + offset) = 1;
+ offset++;
+
+ /* Dump param value */
+ offset += qed_dump_str(char_buf + offset, dump, param_val);
+
+ /* Align buffer to next dword */
+ offset += qed_dump_align(char_buf + offset, dump, offset);
+
+ return BYTES_TO_DWORDS(offset);
+}
+
+/* Writes the specified numeric param to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_dump_num_param(u32 *dump_buf,
+ bool dump, const char *param_name, u32 param_val)
+{
+ char *char_buf = (char *)dump_buf;
+ u32 offset = 0;
+
+ /* Dump param name */
+ offset += qed_dump_str(char_buf + offset, dump, param_name);
+
+ /* Indicate a numeric param value */
+ if (dump)
+ *(char_buf + offset) = 0;
+ offset++;
+
+ /* Align buffer to next dword */
+ offset += qed_dump_align(char_buf + offset, dump, offset);
+
+ /* Dump param value (and change offset from bytes to dwords) */
+ offset = BYTES_TO_DWORDS(offset);
+ if (dump)
+ *(dump_buf + offset) = param_val;
+ offset++;
+
+ return offset;
+}
+
+/* Reads the FW version and writes it as a param to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf, bool dump)
+{
+ char fw_ver_str[16] = EMPTY_FW_VERSION_STR;
+ char fw_img_str[16] = EMPTY_FW_IMAGE_STR;
+ struct fw_info fw_info = { {0}, {0} };
+ u32 offset = 0;
+
+ if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
+ /* Read FW info from chip */
+ qed_read_fw_info(p_hwfn, p_ptt, &fw_info);
+
+ /* Create FW version/image strings */
+ if (snprintf(fw_ver_str, sizeof(fw_ver_str),
+ "%d_%d_%d_%d", fw_info.ver.num.major,
+ fw_info.ver.num.minor, fw_info.ver.num.rev,
+ fw_info.ver.num.eng) < 0)
+ DP_NOTICE(p_hwfn,
+ "Unexpected debug error: invalid FW version string\n");
+ switch (fw_info.ver.image_id) {
+ case FW_IMG_KUKU:
+ strcpy(fw_img_str, "kuku");
+ break;
+ case FW_IMG_MAIN:
+ strcpy(fw_img_str, "main");
+ break;
+ case FW_IMG_L2B:
+ strcpy(fw_img_str, "l2b");
+ break;
+ default:
+ strcpy(fw_img_str, "unknown");
+ break;
+ }
+ }
+
+ /* Dump FW version, image and timestamp */
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, "fw-version", fw_ver_str);
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, "fw-image", fw_img_str);
+ offset += qed_dump_num_param(dump_buf + offset, dump, "fw-timestamp",
+ le32_to_cpu(fw_info.ver.timestamp));
+
+ return offset;
+}
+
+/* Reads the MFW version and writes it as a param to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_dump_mfw_ver_param(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf, bool dump)
+{
+ char mfw_ver_str[16] = EMPTY_FW_VERSION_STR;
+
+ if (dump &&
+ !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) {
+ u32 global_section_offsize, global_section_addr, mfw_ver;
+ u32 public_data_addr, global_section_offsize_addr;
+
+ /* Find MCP public data GRC address. Needs to be ORed with
+ * MCP_REG_SCRATCH due to a HW bug.
+ */
+ public_data_addr = qed_rd(p_hwfn,
+ p_ptt,
+ MISC_REG_SHARED_MEM_ADDR) |
+ MCP_REG_SCRATCH;
+
+ /* Find MCP public global section offset */
+ global_section_offsize_addr = public_data_addr +
+ offsetof(struct mcp_public_data,
+ sections) +
+ sizeof(offsize_t) * PUBLIC_GLOBAL;
+ global_section_offsize = qed_rd(p_hwfn, p_ptt,
+ global_section_offsize_addr);
+ global_section_addr =
+ MCP_REG_SCRATCH +
+ (global_section_offsize & OFFSIZE_OFFSET_MASK) * 4;
+
+ /* Read MFW version from MCP public global section */
+ mfw_ver = qed_rd(p_hwfn, p_ptt,
+ global_section_addr +
+ offsetof(struct public_global, mfw_ver));
+
+ /* Dump MFW version param */
+ if (snprintf(mfw_ver_str, sizeof(mfw_ver_str), "%d_%d_%d_%d",
+ (u8)(mfw_ver >> 24), (u8)(mfw_ver >> 16),
+ (u8)(mfw_ver >> 8), (u8)mfw_ver) < 0)
+ DP_NOTICE(p_hwfn,
+ "Unexpected debug error: invalid MFW version string\n");
+ }
+
+ return qed_dump_str_param(dump_buf, dump, "mfw-version", mfw_ver_str);
+}
+
+/* Reads the chip revision from the chip and writes it as a param to the
+ * specified buffer. Returns the dumped size in dwords.
+ */
+static u32 qed_dump_chip_revision_param(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf, bool dump)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ char param_str[3] = "??";
+
+ if (dev_data->hw_type == HW_TYPE_ASIC) {
+ u32 chip_rev, chip_metal;
+
+ chip_rev = qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_REV);
+ chip_metal = qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_METAL);
+
+ param_str[0] = 'a' + (u8)chip_rev;
+ param_str[1] = '0' + (u8)chip_metal;
+ }
+
+ return qed_dump_str_param(dump_buf, dump, "chip-revision", param_str);
+}
+
+/* Writes a section header to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_dump_section_hdr(u32 *dump_buf,
+ bool dump, const char *name, u32 num_params)
+{
+ return qed_dump_num_param(dump_buf, dump, name, num_params);
+}
+
+/* Writes the common global params to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump,
+ u8 num_specific_global_params)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 offset = 0;
+ u8 num_params;
+
+ /* Dump global params section header */
+ num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params +
+ (dev_data->chip_id == CHIP_BB ? 1 : 0);
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ dump, "global_params", num_params);
+
+ /* Store params */
+ offset += qed_dump_fw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump);
+ offset += qed_dump_mfw_ver_param(p_hwfn,
+ p_ptt, dump_buf + offset, dump);
+ offset += qed_dump_chip_revision_param(p_hwfn,
+ p_ptt, dump_buf + offset, dump);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "tools-version", TOOLS_VERSION);
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump,
+ "chip",
+ s_chip_defs[dev_data->chip_id].name);
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump,
+ "platform",
+ s_hw_type_defs[dev_data->hw_type].name);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "pci-func", p_hwfn->abs_pf_id);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "epoch", qed_get_epoch_time());
+ if (dev_data->chip_id == CHIP_BB)
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "path", QED_PATH_ID(p_hwfn));
+
+ return offset;
+}
+
+/* Writes the "last" section (including CRC) to the specified buffer at the
+ * given offset. Returns the dumped size in dwords.
+ */
+static u32 qed_dump_last_section(u32 *dump_buf, u32 offset, bool dump)
+{
+ u32 start_offset = offset;
+
+ /* Dump CRC section header */
+ offset += qed_dump_section_hdr(dump_buf + offset, dump, "last", 0);
+
+ /* Calculate CRC32 and add it to the dword after the "last" section */
+ if (dump)
+ *(dump_buf + offset) = ~crc32(0xffffffff,
+ (u8 *)dump_buf,
+ DWORDS_TO_BYTES(offset));
+
+ offset++;
+
+ return offset - start_offset;
+}
+
+/* Update blocks reset state */
+static void qed_update_blocks_reset_state(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 reg_val[NUM_DBG_RESET_REGS] = { 0 };
+ u8 rst_reg_id;
+ u32 blk_id;
+
+ /* Read reset registers */
+ for (rst_reg_id = 0; rst_reg_id < NUM_DBG_RESET_REGS; rst_reg_id++) {
+ const struct dbg_reset_reg *rst_reg;
+ bool rst_reg_removed;
+ u32 rst_reg_addr;
+
+ rst_reg = qed_get_dbg_reset_reg(p_hwfn, rst_reg_id);
+ rst_reg_removed = GET_FIELD(rst_reg->data,
+ DBG_RESET_REG_IS_REMOVED);
+ rst_reg_addr = DWORDS_TO_BYTES(GET_FIELD(rst_reg->data,
+ DBG_RESET_REG_ADDR));
+
+ if (!rst_reg_removed)
+ reg_val[rst_reg_id] = qed_rd(p_hwfn, p_ptt,
+ rst_reg_addr);
+ }
+
+ /* Check if blocks are in reset */
+ for (blk_id = 0; blk_id < NUM_PHYS_BLOCKS; blk_id++) {
+ const struct dbg_block_chip *blk;
+ bool has_rst_reg;
+ bool is_removed;
+
+ blk = qed_get_dbg_block_per_chip(p_hwfn, (enum block_id)blk_id);
+ is_removed = GET_FIELD(blk->flags, DBG_BLOCK_CHIP_IS_REMOVED);
+ has_rst_reg = GET_FIELD(blk->flags,
+ DBG_BLOCK_CHIP_HAS_RESET_REG);
+
+ if (!is_removed && has_rst_reg)
+ dev_data->block_in_reset[blk_id] =
+ !(reg_val[blk->reset_reg_id] &
+ BIT(blk->reset_reg_bit_offset));
+ }
+}
+
+/* is_mode_match recursive function */
+static bool qed_is_mode_match_rec(struct qed_hwfn *p_hwfn,
+ u16 *modes_buf_offset, u8 rec_depth)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u8 *dbg_array;
+ bool arg1, arg2;
+ u8 tree_val;
+
+ if (rec_depth > MAX_RECURSION_DEPTH) {
+ DP_NOTICE(p_hwfn,
+ "Unexpected error: is_mode_match_rec exceeded the max recursion depth. This is probably due to a corrupt init/debug buffer.\n");
+ return false;
+ }
+
+ /* Get next element from modes tree buffer */
+ dbg_array = p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr;
+ tree_val = dbg_array[(*modes_buf_offset)++];
+
+ switch (tree_val) {
+ case INIT_MODE_OP_NOT:
+ return !qed_is_mode_match_rec(p_hwfn,
+ modes_buf_offset, rec_depth + 1);
+ case INIT_MODE_OP_OR:
+ case INIT_MODE_OP_AND:
+ arg1 = qed_is_mode_match_rec(p_hwfn,
+ modes_buf_offset, rec_depth + 1);
+ arg2 = qed_is_mode_match_rec(p_hwfn,
+ modes_buf_offset, rec_depth + 1);
+ return (tree_val == INIT_MODE_OP_OR) ? (arg1 ||
+ arg2) : (arg1 && arg2);
+ default:
+ return dev_data->mode_enable[tree_val - MAX_INIT_MODE_OPS] > 0;
+ }
+}
+
+/* Returns true if the mode (specified using modes_buf_offset) is enabled */
+static bool qed_is_mode_match(struct qed_hwfn *p_hwfn, u16 *modes_buf_offset)
+{
+ return qed_is_mode_match_rec(p_hwfn, modes_buf_offset, 0);
+}
+
+/* Enable / disable the Debug block */
+static void qed_bus_enable_dbg_block(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, bool enable)
+{
+ qed_wr(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON, enable ? 1 : 0);
+}
+
+/* Resets the Debug block */
+static void qed_bus_reset_dbg_block(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 reset_reg_addr, old_reset_reg_val, new_reset_reg_val;
+ const struct dbg_reset_reg *reset_reg;
+ const struct dbg_block_chip *block;
+
+ block = qed_get_dbg_block_per_chip(p_hwfn, BLOCK_DBG);
+ reset_reg = qed_get_dbg_reset_reg(p_hwfn, block->reset_reg_id);
+ reset_reg_addr =
+ DWORDS_TO_BYTES(GET_FIELD(reset_reg->data, DBG_RESET_REG_ADDR));
+
+ old_reset_reg_val = qed_rd(p_hwfn, p_ptt, reset_reg_addr);
+ new_reset_reg_val =
+ old_reset_reg_val & ~BIT(block->reset_reg_bit_offset);
+
+ qed_wr(p_hwfn, p_ptt, reset_reg_addr, new_reset_reg_val);
+ qed_wr(p_hwfn, p_ptt, reset_reg_addr, old_reset_reg_val);
+}
+
+/* Enable / disable Debug Bus clients according to the specified mask
+ * (1 = enable, 0 = disable).
+ */
+static void qed_bus_enable_clients(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 client_mask)
+{
+ qed_wr(p_hwfn, p_ptt, DBG_REG_CLIENT_ENABLE, client_mask);
+}
+
+static void qed_bus_config_dbg_line(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum block_id block_id,
+ u8 line_id,
+ u8 enable_mask,
+ u8 right_shift,
+ u8 force_valid_mask, u8 force_frame_mask)
+{
+ const struct dbg_block_chip *block =
+ qed_get_dbg_block_per_chip(p_hwfn, block_id);
+
+ qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_select_reg_addr),
+ line_id);
+ qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_dword_enable_reg_addr),
+ enable_mask);
+ qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_shift_reg_addr),
+ right_shift);
+ qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_force_valid_reg_addr),
+ force_valid_mask);
+ qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_force_frame_reg_addr),
+ force_frame_mask);
+}
+
+/* Disable debug bus in all blocks */
+static void qed_bus_disable_blocks(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 block_id;
+
+ /* Disable all blocks */
+ for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
+ const struct dbg_block_chip *block_per_chip =
+ qed_get_dbg_block_per_chip(p_hwfn,
+ (enum block_id)block_id);
+
+ if (GET_FIELD(block_per_chip->flags,
+ DBG_BLOCK_CHIP_IS_REMOVED) ||
+ dev_data->block_in_reset[block_id])
+ continue;
+
+ /* Disable debug bus */
+ if (GET_FIELD(block_per_chip->flags,
+ DBG_BLOCK_CHIP_HAS_DBG_BUS)) {
+ u32 dbg_en_addr =
+ block_per_chip->dbg_dword_enable_reg_addr;
+ u16 modes_buf_offset =
+ GET_FIELD(block_per_chip->dbg_bus_mode.data,
+ DBG_MODE_HDR_MODES_BUF_OFFSET);
+ bool eval_mode =
+ GET_FIELD(block_per_chip->dbg_bus_mode.data,
+ DBG_MODE_HDR_EVAL_MODE) > 0;
+
+ if (!eval_mode ||
+ qed_is_mode_match(p_hwfn, &modes_buf_offset))
+ qed_wr(p_hwfn, p_ptt,
+ DWORDS_TO_BYTES(dbg_en_addr),
+ 0);
+ }
+ }
+}
+
+/* Returns true if the specified entity (indicated by GRC param) should be
+ * included in the dump, false otherwise.
+ */
+static bool qed_grc_is_included(struct qed_hwfn *p_hwfn,
+ enum dbg_grc_params grc_param)
+{
+ return qed_grc_get_param(p_hwfn, grc_param) > 0;
+}
+
+/* Returns the storm_id that matches the specified Storm letter,
+ * or MAX_DBG_STORMS if invalid storm letter.
+ */
+static enum dbg_storms qed_get_id_from_letter(char storm_letter)
+{
+ u8 storm_id;
+
+ for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++)
+ if (s_storm_defs[storm_id].letter == storm_letter)
+ return (enum dbg_storms)storm_id;
+
+ return MAX_DBG_STORMS;
+}
+
+/* Returns true of the specified Storm should be included in the dump, false
+ * otherwise.
+ */
+static bool qed_grc_is_storm_included(struct qed_hwfn *p_hwfn,
+ enum dbg_storms storm)
+{
+ return qed_grc_get_param(p_hwfn, (enum dbg_grc_params)storm) > 0;
+}
+
+/* Returns true if the specified memory should be included in the dump, false
+ * otherwise.
+ */
+static bool qed_grc_is_mem_included(struct qed_hwfn *p_hwfn,
+ enum block_id block_id, u8 mem_group_id)
+{
+ const struct dbg_block *block;
+ u8 i;
+
+ block = get_dbg_block(p_hwfn, block_id);
+
+ /* If the block is associated with a Storm, check Storm match */
+ if (block->associated_storm_letter) {
+ enum dbg_storms associated_storm_id =
+ qed_get_id_from_letter(block->associated_storm_letter);
+
+ if (associated_storm_id == MAX_DBG_STORMS ||
+ !qed_grc_is_storm_included(p_hwfn, associated_storm_id))
+ return false;
+ }
+
+ for (i = 0; i < NUM_BIG_RAM_TYPES; i++) {
+ struct big_ram_defs *big_ram = &s_big_ram_defs[i];
+
+ if (mem_group_id == big_ram->mem_group_id ||
+ mem_group_id == big_ram->ram_mem_group_id)
+ return qed_grc_is_included(p_hwfn, big_ram->grc_param);
+ }
+
+ switch (mem_group_id) {
+ case MEM_GROUP_PXP_ILT:
+ case MEM_GROUP_PXP_MEM:
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PXP);
+ case MEM_GROUP_RAM:
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RAM);
+ case MEM_GROUP_PBUF:
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PBUF);
+ case MEM_GROUP_CAU_MEM:
+ case MEM_GROUP_CAU_SB:
+ case MEM_GROUP_CAU_PI:
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU);
+ case MEM_GROUP_CAU_MEM_EXT:
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU_EXT);
+ case MEM_GROUP_QM_MEM:
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_QM);
+ case MEM_GROUP_CFC_MEM:
+ case MEM_GROUP_CONN_CFC_MEM:
+ case MEM_GROUP_TASK_CFC_MEM:
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC) ||
+ qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX);
+ case MEM_GROUP_DORQ_MEM:
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DORQ);
+ case MEM_GROUP_IGU_MEM:
+ case MEM_GROUP_IGU_MSIX:
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU);
+ case MEM_GROUP_MULD_MEM:
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MULD);
+ case MEM_GROUP_PRS_MEM:
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PRS);
+ case MEM_GROUP_DMAE_MEM:
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DMAE);
+ case MEM_GROUP_TM_MEM:
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_TM);
+ case MEM_GROUP_SDM_MEM:
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_SDM);
+ case MEM_GROUP_TDIF_CTX:
+ case MEM_GROUP_RDIF_CTX:
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DIF);
+ case MEM_GROUP_CM_MEM:
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM);
+ case MEM_GROUP_IOR:
+ return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR);
+ default:
+ return true;
+ }
+}
+
+/* Stalls all Storms */
+static void qed_grc_stall_storms(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, bool stall)
+{
+ u32 reg_addr;
+ u8 storm_id;
+
+ for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
+ if (!qed_grc_is_storm_included(p_hwfn,
+ (enum dbg_storms)storm_id))
+ continue;
+
+ reg_addr = s_storm_defs[storm_id].sem_fast_mem_addr +
+ SEM_FAST_REG_STALL_0;
+ qed_wr(p_hwfn, p_ptt, reg_addr, stall ? 1 : 0);
+ }
+
+ msleep(STALL_DELAY_MS);
+}
+
+/* Takes all blocks out of reset. If rbc_only is true, only RBC clients are
+ * taken out of reset.
+ */
+static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, bool rbc_only)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u8 chip_id = dev_data->chip_id;
+ u32 i;
+
+ /* Take RBCs out of reset */
+ for (i = 0; i < ARRAY_SIZE(s_rbc_reset_defs); i++)
+ if (s_rbc_reset_defs[i].reset_val[dev_data->chip_id])
+ qed_wr(p_hwfn,
+ p_ptt,
+ s_rbc_reset_defs[i].reset_reg_addr +
+ RESET_REG_UNRESET_OFFSET,
+ s_rbc_reset_defs[i].reset_val[chip_id]);
+
+ if (!rbc_only) {
+ u32 reg_val[NUM_DBG_RESET_REGS] = { 0 };
+ u8 reset_reg_id;
+ u32 block_id;
+
+ /* Fill reset regs values */
+ for (block_id = 0; block_id < NUM_PHYS_BLOCKS; block_id++) {
+ bool is_removed, has_reset_reg, unreset_before_dump;
+ const struct dbg_block_chip *block;
+
+ block = qed_get_dbg_block_per_chip(p_hwfn,
+ (enum block_id)
+ block_id);
+ is_removed =
+ GET_FIELD(block->flags, DBG_BLOCK_CHIP_IS_REMOVED);
+ has_reset_reg =
+ GET_FIELD(block->flags,
+ DBG_BLOCK_CHIP_HAS_RESET_REG);
+ unreset_before_dump =
+ GET_FIELD(block->flags,
+ DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP);
+
+ if (!is_removed && has_reset_reg && unreset_before_dump)
+ reg_val[block->reset_reg_id] |=
+ BIT(block->reset_reg_bit_offset);
+ }
+
+ /* Write reset registers */
+ for (reset_reg_id = 0; reset_reg_id < NUM_DBG_RESET_REGS;
+ reset_reg_id++) {
+ const struct dbg_reset_reg *reset_reg;
+ u32 reset_reg_addr;
+
+ reset_reg = qed_get_dbg_reset_reg(p_hwfn, reset_reg_id);
+
+ if (GET_FIELD
+ (reset_reg->data, DBG_RESET_REG_IS_REMOVED))
+ continue;
+
+ if (reg_val[reset_reg_id]) {
+ reset_reg_addr =
+ GET_FIELD(reset_reg->data,
+ DBG_RESET_REG_ADDR);
+ qed_wr(p_hwfn,
+ p_ptt,
+ DWORDS_TO_BYTES(reset_reg_addr) +
+ RESET_REG_UNRESET_OFFSET,
+ reg_val[reset_reg_id]);
+ }
+ }
+ }
+}
+
+/* Returns the attention block data of the specified block */
+static const struct dbg_attn_block_type_data *
+qed_get_block_attn_data(struct qed_hwfn *p_hwfn,
+ enum block_id block_id, enum dbg_attn_type attn_type)
+{
+ const struct dbg_attn_block *base_attn_block_arr =
+ (const struct dbg_attn_block *)
+ p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr;
+
+ return &base_attn_block_arr[block_id].per_type_data[attn_type];
+}
+
+/* Returns the attention registers of the specified block */
+static const struct dbg_attn_reg *
+qed_get_block_attn_regs(struct qed_hwfn *p_hwfn,
+ enum block_id block_id, enum dbg_attn_type attn_type,
+ u8 *num_attn_regs)
+{
+ const struct dbg_attn_block_type_data *block_type_data =
+ qed_get_block_attn_data(p_hwfn, block_id, attn_type);
+
+ *num_attn_regs = block_type_data->num_regs;
+
+ return (const struct dbg_attn_reg *)
+ p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr +
+ block_type_data->regs_offset;
+}
+
+/* For each block, clear the status of all parities */
+static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ const struct dbg_attn_reg *attn_reg_arr;
+ u32 block_id, sts_clr_address;
+ u8 reg_idx, num_attn_regs;
+
+ for (block_id = 0; block_id < NUM_PHYS_BLOCKS; block_id++) {
+ if (dev_data->block_in_reset[block_id])
+ continue;
+
+ attn_reg_arr = qed_get_block_attn_regs(p_hwfn,
+ (enum block_id)block_id,
+ ATTN_TYPE_PARITY,
+ &num_attn_regs);
+
+ for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
+ const struct dbg_attn_reg *reg_data =
+ &attn_reg_arr[reg_idx];
+ u16 modes_buf_offset;
+ bool eval_mode;
+
+ /* Check mode */
+ eval_mode = GET_FIELD(reg_data->mode.data,
+ DBG_MODE_HDR_EVAL_MODE) > 0;
+ modes_buf_offset =
+ GET_FIELD(reg_data->mode.data,
+ DBG_MODE_HDR_MODES_BUF_OFFSET);
+
+ sts_clr_address = reg_data->sts_clr_address;
+ /* If Mode match: clear parity status */
+ if (!eval_mode ||
+ qed_is_mode_match(p_hwfn, &modes_buf_offset))
+ qed_rd(p_hwfn, p_ptt,
+ DWORDS_TO_BYTES(sts_clr_address));
+ }
+ }
+}
+
+/* Finds the meta data image in NVRAM */
+static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 image_type,
+ u32 *nvram_offset_bytes,
+ u32 *nvram_size_bytes,
+ bool b_can_sleep)
+{
+ u32 ret_mcp_resp, ret_mcp_param, ret_txn_size;
+ struct mcp_file_att file_att;
+ int nvm_result;
+
+ /* Call NVRAM get file command */
+ nvm_result = qed_mcp_nvm_rd_cmd(p_hwfn,
+ p_ptt,
+ DRV_MSG_CODE_NVM_GET_FILE_ATT,
+ image_type,
+ &ret_mcp_resp,
+ &ret_mcp_param,
+ &ret_txn_size,
+ (u32 *)&file_att,
+ b_can_sleep);
+
+ /* Check response */
+ if (nvm_result || (ret_mcp_resp & FW_MSG_CODE_MASK) !=
+ FW_MSG_CODE_NVM_OK)
+ return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
+
+ /* Update return values */
+ *nvram_offset_bytes = file_att.nvm_start_addr;
+ *nvram_size_bytes = file_att.len;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DEBUG,
+ "find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n",
+ image_type, *nvram_offset_bytes, *nvram_size_bytes);
+
+ /* Check alignment */
+ if (*nvram_size_bytes & 0x3)
+ return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE;
+
+ return DBG_STATUS_OK;
+}
+
+/* Reads data from NVRAM */
+static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 nvram_offset_bytes,
+ u32 nvram_size_bytes,
+ u32 *ret_buf,
+ bool b_can_sleep)
+{
+ u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy;
+ s32 bytes_left = nvram_size_bytes;
+ u32 read_offset = 0, param = 0;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DEBUG,
+ "nvram_read: reading image of size %d bytes from NVRAM\n",
+ nvram_size_bytes);
+
+ do {
+ bytes_to_copy =
+ (bytes_left >
+ MCP_DRV_NVM_BUF_LEN) ? MCP_DRV_NVM_BUF_LEN : bytes_left;
+
+ /* Call NVRAM read command */
+ SET_MFW_FIELD(param,
+ DRV_MB_PARAM_NVM_OFFSET,
+ nvram_offset_bytes + read_offset);
+ SET_MFW_FIELD(param, DRV_MB_PARAM_NVM_LEN, bytes_to_copy);
+ if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
+ DRV_MSG_CODE_NVM_READ_NVRAM, param,
+ &ret_mcp_resp,
+ &ret_mcp_param, &ret_read_size,
+ (u32 *)((u8 *)ret_buf + read_offset),
+ b_can_sleep))
+ return DBG_STATUS_NVRAM_READ_FAILED;
+
+ /* Check response */
+ if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
+ return DBG_STATUS_NVRAM_READ_FAILED;
+
+ /* Update read offset */
+ read_offset += ret_read_size;
+ bytes_left -= ret_read_size;
+ } while (bytes_left > 0);
+
+ return DBG_STATUS_OK;
+}
+
+/* Dumps GRC registers section header. Returns the dumped size in dwords.
+ * the following parameters are dumped:
+ * - count: no. of dumped entries
+ * - split_type: split type
+ * - split_id: split ID (dumped only if split_id != SPLIT_TYPE_NONE)
+ * - reg_type_name: register type name (dumped only if reg_type_name != NULL)
+ */
+static u32 qed_grc_dump_regs_hdr(u32 *dump_buf,
+ bool dump,
+ u32 num_reg_entries,
+ enum init_split_types split_type,
+ u8 split_id, const char *reg_type_name)
+{
+ u8 num_params = 2 +
+ (split_type != SPLIT_TYPE_NONE ? 1 : 0) + (reg_type_name ? 1 : 0);
+ u32 offset = 0;
+
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ dump, "grc_regs", num_params);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "count", num_reg_entries);
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, "split",
+ s_split_type_defs[split_type].name);
+ if (split_type != SPLIT_TYPE_NONE)
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "id", split_id);
+ if (reg_type_name)
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, "type", reg_type_name);
+
+ return offset;
+}
+
+/* Reads the specified registers into the specified buffer.
+ * The addr and len arguments are specified in dwords.
+ */
+void qed_read_regs(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len)
+{
+ u32 i;
+
+ for (i = 0; i < len; i++)
+ buf[i] = qed_rd(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr + i));
+}
+
+/* Dumps the GRC registers in the specified address range.
+ * Returns the dumped size in dwords.
+ * The addr and len arguments are specified in dwords.
+ */
+static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump, u32 addr, u32 len, bool wide_bus,
+ enum init_split_types split_type,
+ u8 split_id)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u8 port_id = 0, pf_id = 0, vf_id = 0;
+ bool read_using_dmae = false;
+ u32 thresh;
+ u16 fid;
+
+ if (!dump)
+ return len;
+
+ switch (split_type) {
+ case SPLIT_TYPE_PORT:
+ port_id = split_id;
+ break;
+ case SPLIT_TYPE_PF:
+ pf_id = split_id;
+ break;
+ case SPLIT_TYPE_PORT_PF:
+ port_id = split_id / dev_data->num_pfs_per_port;
+ pf_id = port_id + dev_data->num_ports *
+ (split_id % dev_data->num_pfs_per_port);
+ break;
+ case SPLIT_TYPE_VF:
+ vf_id = split_id;
+ break;
+ default:
+ break;
+ }
+
+ /* Try reading using DMAE */
+ if (dev_data->use_dmae && split_type != SPLIT_TYPE_VF &&
+ (len >= s_hw_type_defs[dev_data->hw_type].dmae_thresh ||
+ (PROTECT_WIDE_BUS && wide_bus))) {
+ struct qed_dmae_params dmae_params;
+
+ /* Set DMAE params */
+ memset(&dmae_params, 0, sizeof(dmae_params));
+ SET_FIELD(dmae_params.flags, QED_DMAE_PARAMS_COMPLETION_DST, 1);
+ switch (split_type) {
+ case SPLIT_TYPE_PORT:
+ SET_FIELD(dmae_params.flags, QED_DMAE_PARAMS_PORT_VALID,
+ 1);
+ dmae_params.port_id = port_id;
+ break;
+ case SPLIT_TYPE_PF:
+ SET_FIELD(dmae_params.flags,
+ QED_DMAE_PARAMS_SRC_PF_VALID, 1);
+ dmae_params.src_pfid = pf_id;
+ break;
+ case SPLIT_TYPE_PORT_PF:
+ SET_FIELD(dmae_params.flags, QED_DMAE_PARAMS_PORT_VALID,
+ 1);
+ SET_FIELD(dmae_params.flags,
+ QED_DMAE_PARAMS_SRC_PF_VALID, 1);
+ dmae_params.port_id = port_id;
+ dmae_params.src_pfid = pf_id;
+ break;
+ default:
+ break;
+ }
+
+ /* Execute DMAE command */
+ read_using_dmae = !qed_dmae_grc2host(p_hwfn,
+ p_ptt,
+ DWORDS_TO_BYTES(addr),
+ (u64)(uintptr_t)(dump_buf),
+ len, &dmae_params);
+ if (!read_using_dmae) {
+ dev_data->use_dmae = 0;
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DEBUG,
+ "Failed reading from chip using DMAE, using GRC instead\n");
+ }
+ }
+
+ if (read_using_dmae)
+ goto print_log;
+
+ /* If not read using DMAE, read using GRC */
+
+ /* Set pretend */
+ if (split_type != dev_data->pretend.split_type ||
+ split_id != dev_data->pretend.split_id) {
+ switch (split_type) {
+ case SPLIT_TYPE_PORT:
+ qed_port_pretend(p_hwfn, p_ptt, port_id);
+ break;
+ case SPLIT_TYPE_PF:
+ fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID,
+ pf_id);
+ qed_fid_pretend(p_hwfn, p_ptt, fid);
+ break;
+ case SPLIT_TYPE_PORT_PF:
+ fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID,
+ pf_id);
+ qed_port_fid_pretend(p_hwfn, p_ptt, port_id, fid);
+ break;
+ case SPLIT_TYPE_VF:
+ fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_VFVALID, 1)
+ | FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_VFID,
+ vf_id);
+ qed_fid_pretend(p_hwfn, p_ptt, fid);
+ break;
+ default:
+ break;
+ }
+
+ dev_data->pretend.split_type = (u8)split_type;
+ dev_data->pretend.split_id = split_id;
+ }
+
+ /* Read registers using GRC */
+ qed_read_regs(p_hwfn, p_ptt, dump_buf, addr, len);
+
+print_log:
+ /* Print log */
+ dev_data->num_regs_read += len;
+ thresh = s_hw_type_defs[dev_data->hw_type].log_thresh;
+ if ((dev_data->num_regs_read / thresh) >
+ ((dev_data->num_regs_read - len) / thresh))
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DEBUG,
+ "Dumped %d registers...\n", dev_data->num_regs_read);
+
+ return len;
+}
+
+/* Dumps GRC registers sequence header. Returns the dumped size in dwords.
+ * The addr and len arguments are specified in dwords.
+ */
+static u32 qed_grc_dump_reg_entry_hdr(u32 *dump_buf,
+ bool dump, u32 addr, u32 len)
+{
+ if (dump)
+ *dump_buf = addr | (len << REG_DUMP_LEN_SHIFT);
+
+ return 1;
+}
+
+/* Dumps GRC registers sequence. Returns the dumped size in dwords.
+ * The addr and len arguments are specified in dwords.
+ */
+static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump, u32 addr, u32 len, bool wide_bus,
+ enum init_split_types split_type, u8 split_id)
+{
+ u32 offset = 0;
+
+ offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, len);
+ offset += qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump, addr, len, wide_bus,
+ split_type, split_id);
+
+ return offset;
+}
+
+/* Dumps GRC registers sequence with skip cycle.
+ * Returns the dumped size in dwords.
+ * - addr: start GRC address in dwords
+ * - total_len: total no. of dwords to dump
+ * - read_len: no. consecutive dwords to read
+ * - skip_len: no. of dwords to skip (and fill with zeros)
+ */
+static u32 qed_grc_dump_reg_entry_skip(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump,
+ u32 addr,
+ u32 total_len,
+ u32 read_len, u32 skip_len)
+{
+ u32 offset = 0, reg_offset = 0;
+
+ offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, total_len);
+
+ if (!dump)
+ return offset + total_len;
+
+ while (reg_offset < total_len) {
+ u32 curr_len = min_t(u32, read_len, total_len - reg_offset);
+
+ offset += qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump, addr, curr_len, false,
+ SPLIT_TYPE_NONE, 0);
+ reg_offset += curr_len;
+ addr += curr_len;
+
+ if (reg_offset < total_len) {
+ curr_len = min_t(u32, skip_len, total_len - skip_len);
+ memset(dump_buf + offset, 0, DWORDS_TO_BYTES(curr_len));
+ offset += curr_len;
+ reg_offset += curr_len;
+ addr += curr_len;
+ }
+ }
+
+ return offset;
+}
+
+/* Dumps GRC registers entries. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct virt_mem_desc input_regs_arr,
+ u32 *dump_buf,
+ bool dump,
+ enum init_split_types split_type,
+ u8 split_id,
+ bool block_enable[MAX_BLOCK_ID],
+ u32 *num_dumped_reg_entries)
+{
+ u32 i, offset = 0, input_offset = 0;
+ bool mode_match = true;
+
+ *num_dumped_reg_entries = 0;
+
+ while (input_offset < BYTES_TO_DWORDS(input_regs_arr.size)) {
+ const struct dbg_dump_cond_hdr *cond_hdr =
+ (const struct dbg_dump_cond_hdr *)
+ input_regs_arr.ptr + input_offset++;
+ u16 modes_buf_offset;
+ bool eval_mode;
+
+ /* Check mode/block */
+ eval_mode = GET_FIELD(cond_hdr->mode.data,
+ DBG_MODE_HDR_EVAL_MODE) > 0;
+ if (eval_mode) {
+ modes_buf_offset =
+ GET_FIELD(cond_hdr->mode.data,
+ DBG_MODE_HDR_MODES_BUF_OFFSET);
+ mode_match = qed_is_mode_match(p_hwfn,
+ &modes_buf_offset);
+ }
+
+ if (!mode_match || !block_enable[cond_hdr->block_id]) {
+ input_offset += cond_hdr->data_size;
+ continue;
+ }
+
+ for (i = 0; i < cond_hdr->data_size; i++, input_offset++) {
+ const struct dbg_dump_reg *reg =
+ (const struct dbg_dump_reg *)
+ input_regs_arr.ptr + input_offset;
+ u32 addr, len;
+ bool wide_bus;
+
+ addr = GET_FIELD(reg->data, DBG_DUMP_REG_ADDRESS);
+ len = GET_FIELD(reg->data, DBG_DUMP_REG_LENGTH);
+ wide_bus = GET_FIELD(reg->data, DBG_DUMP_REG_WIDE_BUS);
+ offset += qed_grc_dump_reg_entry(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ len,
+ wide_bus,
+ split_type, split_id);
+ (*num_dumped_reg_entries)++;
+ }
+ }
+
+ return offset;
+}
+
+/* Dumps GRC registers entries. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct virt_mem_desc input_regs_arr,
+ u32 *dump_buf,
+ bool dump,
+ bool block_enable[MAX_BLOCK_ID],
+ enum init_split_types split_type,
+ u8 split_id, const char *reg_type_name)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ enum init_split_types hdr_split_type = split_type;
+ u32 num_dumped_reg_entries, offset;
+ u8 hdr_split_id = split_id;
+
+ /* In PORT_PF split type, print a port split header */
+ if (split_type == SPLIT_TYPE_PORT_PF) {
+ hdr_split_type = SPLIT_TYPE_PORT;
+ hdr_split_id = split_id / dev_data->num_pfs_per_port;
+ }
+
+ /* Calculate register dump header size (and skip it for now) */
+ offset = qed_grc_dump_regs_hdr(dump_buf,
+ false,
+ 0,
+ hdr_split_type,
+ hdr_split_id, reg_type_name);
+
+ /* Dump registers */
+ offset += qed_grc_dump_regs_entries(p_hwfn,
+ p_ptt,
+ input_regs_arr,
+ dump_buf + offset,
+ dump,
+ split_type,
+ split_id,
+ block_enable,
+ &num_dumped_reg_entries);
+
+ /* Write register dump header */
+ if (dump && num_dumped_reg_entries > 0)
+ qed_grc_dump_regs_hdr(dump_buf,
+ dump,
+ num_dumped_reg_entries,
+ hdr_split_type,
+ hdr_split_id, reg_type_name);
+
+ return num_dumped_reg_entries > 0 ? offset : 0;
+}
+
+/* Dumps registers according to the input registers array. Returns the dumped
+ * size in dwords.
+ */
+static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump,
+ bool block_enable[MAX_BLOCK_ID],
+ const char *reg_type_name)
+{
+ struct virt_mem_desc *dbg_buf =
+ &p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_REG];
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 offset = 0, input_offset = 0;
+
+ while (input_offset < BYTES_TO_DWORDS(dbg_buf->size)) {
+ const struct dbg_dump_split_hdr *split_hdr;
+ struct virt_mem_desc curr_input_regs_arr;
+ enum init_split_types split_type;
+ u16 split_count = 0;
+ u32 split_data_size;
+ u8 split_id;
+
+ split_hdr =
+ (const struct dbg_dump_split_hdr *)
+ dbg_buf->ptr + input_offset++;
+ split_type =
+ GET_FIELD(split_hdr->hdr,
+ DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
+ split_data_size = GET_FIELD(split_hdr->hdr,
+ DBG_DUMP_SPLIT_HDR_DATA_SIZE);
+ curr_input_regs_arr.ptr =
+ (u32 *)p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr +
+ input_offset;
+ curr_input_regs_arr.size = DWORDS_TO_BYTES(split_data_size);
+
+ switch (split_type) {
+ case SPLIT_TYPE_NONE:
+ split_count = 1;
+ break;
+ case SPLIT_TYPE_PORT:
+ split_count = dev_data->num_ports;
+ break;
+ case SPLIT_TYPE_PF:
+ case SPLIT_TYPE_PORT_PF:
+ split_count = dev_data->num_ports *
+ dev_data->num_pfs_per_port;
+ break;
+ case SPLIT_TYPE_VF:
+ split_count = dev_data->num_vfs;
+ break;
+ default:
+ return 0;
+ }
+
+ for (split_id = 0; split_id < split_count; split_id++)
+ offset += qed_grc_dump_split_data(p_hwfn, p_ptt,
+ curr_input_regs_arr,
+ dump_buf + offset,
+ dump, block_enable,
+ split_type,
+ split_id,
+ reg_type_name);
+
+ input_offset += split_data_size;
+ }
+
+ /* Cancel pretends (pretend to original PF) */
+ if (dump) {
+ qed_fid_pretend(p_hwfn, p_ptt,
+ FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID,
+ p_hwfn->rel_pf_id));
+ dev_data->pretend.split_type = SPLIT_TYPE_NONE;
+ dev_data->pretend.split_id = 0;
+ }
+
+ return offset;
+}
+
+/* Dump reset registers. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf, bool dump)
+{
+ u32 offset = 0, num_regs = 0;
+ u8 reset_reg_id;
+
+ /* Calculate header size */
+ offset += qed_grc_dump_regs_hdr(dump_buf,
+ false,
+ 0, SPLIT_TYPE_NONE, 0, "RESET_REGS");
+
+ /* Write reset registers */
+ for (reset_reg_id = 0; reset_reg_id < NUM_DBG_RESET_REGS;
+ reset_reg_id++) {
+ const struct dbg_reset_reg *reset_reg;
+ u32 reset_reg_addr;
+
+ reset_reg = qed_get_dbg_reset_reg(p_hwfn, reset_reg_id);
+
+ if (GET_FIELD(reset_reg->data, DBG_RESET_REG_IS_REMOVED))
+ continue;
+
+ reset_reg_addr = GET_FIELD(reset_reg->data, DBG_RESET_REG_ADDR);
+ offset += qed_grc_dump_reg_entry(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ reset_reg_addr,
+ 1, false, SPLIT_TYPE_NONE, 0);
+ num_regs++;
+ }
+
+ /* Write header */
+ if (dump)
+ qed_grc_dump_regs_hdr(dump_buf,
+ true, num_regs, SPLIT_TYPE_NONE,
+ 0, "RESET_REGS");
+
+ return offset;
+}
+
+/* Dump registers that are modified during GRC Dump and therefore must be
+ * dumped first. Returns the dumped size in dwords.
+ */
+static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf, bool dump)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 block_id, offset = 0, stall_regs_offset;
+ const struct dbg_attn_reg *attn_reg_arr;
+ u8 storm_id, reg_idx, num_attn_regs;
+ u32 num_reg_entries = 0;
+
+ /* Write empty header for attention registers */
+ offset += qed_grc_dump_regs_hdr(dump_buf,
+ false,
+ 0, SPLIT_TYPE_NONE, 0, "ATTN_REGS");
+
+ /* Write parity registers */
+ for (block_id = 0; block_id < NUM_PHYS_BLOCKS; block_id++) {
+ if (dev_data->block_in_reset[block_id] && dump)
+ continue;
+
+ attn_reg_arr = qed_get_block_attn_regs(p_hwfn,
+ (enum block_id)block_id,
+ ATTN_TYPE_PARITY,
+ &num_attn_regs);
+
+ for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
+ const struct dbg_attn_reg *reg_data =
+ &attn_reg_arr[reg_idx];
+ u16 modes_buf_offset;
+ bool eval_mode;
+ u32 addr;
+
+ /* Check mode */
+ eval_mode = GET_FIELD(reg_data->mode.data,
+ DBG_MODE_HDR_EVAL_MODE) > 0;
+ modes_buf_offset =
+ GET_FIELD(reg_data->mode.data,
+ DBG_MODE_HDR_MODES_BUF_OFFSET);
+ if (eval_mode &&
+ !qed_is_mode_match(p_hwfn, &modes_buf_offset))
+ continue;
+
+ /* Mode match: read & dump registers */
+ addr = reg_data->mask_address;
+ offset += qed_grc_dump_reg_entry(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ 1, false,
+ SPLIT_TYPE_NONE, 0);
+ addr = GET_FIELD(reg_data->data,
+ DBG_ATTN_REG_STS_ADDRESS);
+ offset += qed_grc_dump_reg_entry(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ 1, false,
+ SPLIT_TYPE_NONE, 0);
+ num_reg_entries += 2;
+ }
+ }
+
+ /* Overwrite header for attention registers */
+ if (dump)
+ qed_grc_dump_regs_hdr(dump_buf,
+ true,
+ num_reg_entries,
+ SPLIT_TYPE_NONE, 0, "ATTN_REGS");
+
+ /* Write empty header for stall registers */
+ stall_regs_offset = offset;
+ offset += qed_grc_dump_regs_hdr(dump_buf,
+ false, 0, SPLIT_TYPE_NONE, 0, "REGS");
+
+ /* Write Storm stall status registers */
+ for (storm_id = 0, num_reg_entries = 0; storm_id < MAX_DBG_STORMS;
+ storm_id++) {
+ struct storm_defs *storm = &s_storm_defs[storm_id];
+ u32 addr;
+
+ if (dev_data->block_in_reset[storm->sem_block_id] && dump)
+ continue;
+
+ addr =
+ BYTES_TO_DWORDS(storm->sem_fast_mem_addr +
+ SEM_FAST_REG_STALLED);
+ offset += qed_grc_dump_reg_entry(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ 1,
+ false, SPLIT_TYPE_NONE, 0);
+ num_reg_entries++;
+ }
+
+ /* Overwrite header for stall registers */
+ if (dump)
+ qed_grc_dump_regs_hdr(dump_buf + stall_regs_offset,
+ true,
+ num_reg_entries,
+ SPLIT_TYPE_NONE, 0, "REGS");
+
+ return offset;
+}
+
+/* Dumps registers that can't be represented in the debug arrays */
+static u32 qed_grc_dump_special_regs(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf, bool dump)
+{
+ u32 offset = 0, addr;
+
+ offset += qed_grc_dump_regs_hdr(dump_buf,
+ dump, 2, SPLIT_TYPE_NONE, 0, "REGS");
+
+ /* Dump R/TDIF_REG_DEBUG_ERROR_INFO_SIZE (every 8'th register should be
+ * skipped).
+ */
+ addr = BYTES_TO_DWORDS(RDIF_REG_DEBUG_ERROR_INFO);
+ offset += qed_grc_dump_reg_entry_skip(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ RDIF_REG_DEBUG_ERROR_INFO_SIZE,
+ 7,
+ 1);
+ addr = BYTES_TO_DWORDS(TDIF_REG_DEBUG_ERROR_INFO);
+ offset +=
+ qed_grc_dump_reg_entry_skip(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ TDIF_REG_DEBUG_ERROR_INFO_SIZE,
+ 7,
+ 1);
+
+ return offset;
+}
+
+/* Dumps a GRC memory header (section and params). Returns the dumped size in
+ * dwords. The following parameters are dumped:
+ * - name: dumped only if it's not NULL.
+ * - addr: in dwords, dumped only if name is NULL.
+ * - len: in dwords, always dumped.
+ * - width: dumped if it's not zero.
+ * - packed: dumped only if it's not false.
+ * - mem_group: always dumped.
+ * - is_storm: true only if the memory is related to a Storm.
+ * - storm_letter: valid only if is_storm is true.
+ *
+ */
+static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ bool dump,
+ const char *name,
+ u32 addr,
+ u32 len,
+ u32 bit_width,
+ bool packed,
+ const char *mem_group, char storm_letter)
+{
+ u8 num_params = 3;
+ u32 offset = 0;
+ char buf[64];
+
+ if (!len)
+ DP_NOTICE(p_hwfn,
+ "Unexpected GRC Dump error: dumped memory size must be non-zero\n");
+
+ if (bit_width)
+ num_params++;
+ if (packed)
+ num_params++;
+
+ /* Dump section header */
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ dump, "grc_mem", num_params);
+
+ if (name) {
+ /* Dump name */
+ if (storm_letter) {
+ strcpy(buf, "?STORM_");
+ buf[0] = storm_letter;
+ strcpy(buf + strlen(buf), name);
+ } else {
+ strcpy(buf, name);
+ }
+
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, "name", buf);
+ } else {
+ /* Dump address */
+ u32 addr_in_bytes = DWORDS_TO_BYTES(addr);
+
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "addr", addr_in_bytes);
+ }
+
+ /* Dump len */
+ offset += qed_dump_num_param(dump_buf + offset, dump, "len", len);
+
+ /* Dump bit width */
+ if (bit_width)
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "width", bit_width);
+
+ /* Dump packed */
+ if (packed)
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "packed", 1);
+
+ /* Dump reg type */
+ if (storm_letter) {
+ strcpy(buf, "?STORM_");
+ buf[0] = storm_letter;
+ strcpy(buf + strlen(buf), mem_group);
+ } else {
+ strcpy(buf, mem_group);
+ }
+
+ offset += qed_dump_str_param(dump_buf + offset, dump, "type", buf);
+
+ return offset;
+}
+
+/* Dumps a single GRC memory. If name is NULL, the memory is stored by address.
+ * Returns the dumped size in dwords.
+ * The addr and len arguments are specified in dwords.
+ */
+static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump,
+ const char *name,
+ u32 addr,
+ u32 len,
+ bool wide_bus,
+ u32 bit_width,
+ bool packed,
+ const char *mem_group, char storm_letter)
+{
+ u32 offset = 0;
+
+ offset += qed_grc_dump_mem_hdr(p_hwfn,
+ dump_buf + offset,
+ dump,
+ name,
+ addr,
+ len,
+ bit_width,
+ packed, mem_group, storm_letter);
+ offset += qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump, addr, len, wide_bus,
+ SPLIT_TYPE_NONE, 0);
+
+ return offset;
+}
+
+/* Dumps GRC memories entries. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct virt_mem_desc input_mems_arr,
+ u32 *dump_buf, bool dump)
+{
+ u32 i, offset = 0, input_offset = 0;
+ bool mode_match = true;
+
+ while (input_offset < BYTES_TO_DWORDS(input_mems_arr.size)) {
+ const struct dbg_dump_cond_hdr *cond_hdr;
+ u16 modes_buf_offset;
+ u32 num_entries;
+ bool eval_mode;
+
+ cond_hdr =
+ (const struct dbg_dump_cond_hdr *)input_mems_arr.ptr +
+ input_offset++;
+ num_entries = cond_hdr->data_size / MEM_DUMP_ENTRY_SIZE_DWORDS;
+
+ /* Check required mode */
+ eval_mode = GET_FIELD(cond_hdr->mode.data,
+ DBG_MODE_HDR_EVAL_MODE) > 0;
+ if (eval_mode) {
+ modes_buf_offset =
+ GET_FIELD(cond_hdr->mode.data,
+ DBG_MODE_HDR_MODES_BUF_OFFSET);
+ mode_match = qed_is_mode_match(p_hwfn,
+ &modes_buf_offset);
+ }
+
+ if (!mode_match) {
+ input_offset += cond_hdr->data_size;
+ continue;
+ }
+
+ for (i = 0; i < num_entries;
+ i++, input_offset += MEM_DUMP_ENTRY_SIZE_DWORDS) {
+ const struct dbg_dump_mem *mem =
+ (const struct dbg_dump_mem *)((u32 *)
+ input_mems_arr.ptr
+ + input_offset);
+ const struct dbg_block *block;
+ char storm_letter = 0;
+ u32 mem_addr, mem_len;
+ bool mem_wide_bus;
+ u8 mem_group_id;
+
+ mem_group_id = GET_FIELD(mem->dword0,
+ DBG_DUMP_MEM_MEM_GROUP_ID);
+ if (mem_group_id >= MEM_GROUPS_NUM) {
+ DP_NOTICE(p_hwfn, "Invalid mem_group_id\n");
+ return 0;
+ }
+
+ if (!qed_grc_is_mem_included(p_hwfn,
+ (enum block_id)
+ cond_hdr->block_id,
+ mem_group_id))
+ continue;
+
+ mem_addr = GET_FIELD(mem->dword0, DBG_DUMP_MEM_ADDRESS);
+ mem_len = GET_FIELD(mem->dword1, DBG_DUMP_MEM_LENGTH);
+ mem_wide_bus = GET_FIELD(mem->dword1,
+ DBG_DUMP_MEM_WIDE_BUS);
+
+ block = get_dbg_block(p_hwfn,
+ cond_hdr->block_id);
+
+ /* If memory is associated with Storm,
+ * update storm details
+ */
+ if (block->associated_storm_letter)
+ storm_letter = block->associated_storm_letter;
+
+ /* Dump memory */
+ offset += qed_grc_dump_mem(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ NULL,
+ mem_addr,
+ mem_len,
+ mem_wide_bus,
+ 0,
+ false,
+ s_mem_group_names[mem_group_id],
+ storm_letter);
+ }
+ }
+
+ return offset;
+}
+
+/* Dumps GRC memories according to the input array dump_mem.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf, bool dump)
+{
+ struct virt_mem_desc *dbg_buf =
+ &p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_MEM];
+ u32 offset = 0, input_offset = 0;
+
+ while (input_offset < BYTES_TO_DWORDS(dbg_buf->size)) {
+ const struct dbg_dump_split_hdr *split_hdr;
+ struct virt_mem_desc curr_input_mems_arr;
+ enum init_split_types split_type;
+ u32 split_data_size;
+
+ split_hdr =
+ (const struct dbg_dump_split_hdr *)dbg_buf->ptr +
+ input_offset++;
+ split_type = GET_FIELD(split_hdr->hdr,
+ DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID);
+ split_data_size = GET_FIELD(split_hdr->hdr,
+ DBG_DUMP_SPLIT_HDR_DATA_SIZE);
+ curr_input_mems_arr.ptr = (u32 *)dbg_buf->ptr + input_offset;
+ curr_input_mems_arr.size = DWORDS_TO_BYTES(split_data_size);
+
+ if (split_type == SPLIT_TYPE_NONE)
+ offset += qed_grc_dump_mem_entries(p_hwfn,
+ p_ptt,
+ curr_input_mems_arr,
+ dump_buf + offset,
+ dump);
+ else
+ DP_NOTICE(p_hwfn,
+ "Dumping split memories is currently not supported\n");
+
+ input_offset += split_data_size;
+ }
+
+ return offset;
+}
+
+/* Dumps GRC context data for the specified Storm.
+ * Returns the dumped size in dwords.
+ * The lid_size argument is specified in quad-regs.
+ */
+static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump,
+ const char *name,
+ u32 num_lids,
+ enum cm_ctx_types ctx_type, u8 storm_id)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ struct storm_defs *storm = &s_storm_defs[storm_id];
+ u32 i, lid, lid_size, total_size;
+ u32 rd_reg_addr, offset = 0;
+
+ /* Convert quad-regs to dwords */
+ lid_size = storm->cm_ctx_lid_sizes[dev_data->chip_id][ctx_type] * 4;
+
+ if (!lid_size)
+ return 0;
+
+ total_size = num_lids * lid_size;
+
+ offset += qed_grc_dump_mem_hdr(p_hwfn,
+ dump_buf + offset,
+ dump,
+ name,
+ 0,
+ total_size,
+ lid_size * 32,
+ false, name, storm->letter);
+
+ if (!dump)
+ return offset + total_size;
+
+ rd_reg_addr = BYTES_TO_DWORDS(storm->cm_ctx_rd_addr[ctx_type]);
+
+ /* Dump context data */
+ for (lid = 0; lid < num_lids; lid++) {
+ for (i = 0; i < lid_size; i++) {
+ qed_wr(p_hwfn,
+ p_ptt, storm->cm_ctx_wr_addr, (i << 9) | lid);
+ offset += qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ rd_reg_addr,
+ 1,
+ false,
+ SPLIT_TYPE_NONE, 0);
+ }
+ }
+
+ return offset;
+}
+
+/* Dumps GRC contexts. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_ctx(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
+{
+ u32 offset = 0;
+ u8 storm_id;
+
+ for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
+ if (!qed_grc_is_storm_included(p_hwfn,
+ (enum dbg_storms)storm_id))
+ continue;
+
+ /* Dump Conn AG context size */
+ offset += qed_grc_dump_ctx_data(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ "CONN_AG_CTX",
+ NUM_OF_LCIDS,
+ CM_CTX_CONN_AG, storm_id);
+
+ /* Dump Conn ST context size */
+ offset += qed_grc_dump_ctx_data(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ "CONN_ST_CTX",
+ NUM_OF_LCIDS,
+ CM_CTX_CONN_ST, storm_id);
+
+ /* Dump Task AG context size */
+ offset += qed_grc_dump_ctx_data(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ "TASK_AG_CTX",
+ NUM_OF_LTIDS,
+ CM_CTX_TASK_AG, storm_id);
+
+ /* Dump Task ST context size */
+ offset += qed_grc_dump_ctx_data(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ "TASK_ST_CTX",
+ NUM_OF_LTIDS,
+ CM_CTX_TASK_ST, storm_id);
+ }
+
+ return offset;
+}
+
+#define VFC_STATUS_RESP_READY_BIT 0
+#define VFC_STATUS_BUSY_BIT 1
+#define VFC_STATUS_SENDING_CMD_BIT 2
+
+#define VFC_POLLING_DELAY_MS 1
+#define VFC_POLLING_COUNT 20
+
+/* Reads data from VFC. Returns the number of dwords read (0 on error).
+ * Sizes are specified in dwords.
+ */
+static u32 qed_grc_dump_read_from_vfc(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct storm_defs *storm,
+ u32 *cmd_data,
+ u32 cmd_size,
+ u32 *addr_data,
+ u32 addr_size,
+ u32 resp_size, u32 *dump_buf)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 vfc_status, polling_ms, polling_count = 0, i;
+ u32 reg_addr, sem_base;
+ bool is_ready = false;
+
+ sem_base = storm->sem_fast_mem_addr;
+ polling_ms = VFC_POLLING_DELAY_MS *
+ s_hw_type_defs[dev_data->hw_type].delay_factor;
+
+ /* Write VFC command */
+ ARR_REG_WR(p_hwfn,
+ p_ptt,
+ sem_base + SEM_FAST_REG_VFC_DATA_WR,
+ cmd_data, cmd_size);
+
+ /* Write VFC address */
+ ARR_REG_WR(p_hwfn,
+ p_ptt,
+ sem_base + SEM_FAST_REG_VFC_ADDR,
+ addr_data, addr_size);
+
+ /* Read response */
+ for (i = 0; i < resp_size; i++) {
+ /* Poll until ready */
+ do {
+ reg_addr = sem_base + SEM_FAST_REG_VFC_STATUS;
+ qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ &vfc_status,
+ true,
+ BYTES_TO_DWORDS(reg_addr),
+ 1,
+ false, SPLIT_TYPE_NONE, 0);
+ is_ready = vfc_status & BIT(VFC_STATUS_RESP_READY_BIT);
+
+ if (!is_ready) {
+ if (polling_count++ == VFC_POLLING_COUNT)
+ return 0;
+
+ msleep(polling_ms);
+ }
+ } while (!is_ready);
+
+ reg_addr = sem_base + SEM_FAST_REG_VFC_DATA_RD;
+ qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + i,
+ true,
+ BYTES_TO_DWORDS(reg_addr),
+ 1, false, SPLIT_TYPE_NONE, 0);
+ }
+
+ return resp_size;
+}
+
+/* Dump VFC CAM. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_vfc_cam(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf, bool dump, u8 storm_id)
+{
+ u32 total_size = VFC_CAM_NUM_ROWS * VFC_CAM_RESP_DWORDS;
+ struct storm_defs *storm = &s_storm_defs[storm_id];
+ u32 cam_addr[VFC_CAM_ADDR_DWORDS] = { 0 };
+ u32 cam_cmd[VFC_CAM_CMD_DWORDS] = { 0 };
+ u32 row, offset = 0;
+
+ offset += qed_grc_dump_mem_hdr(p_hwfn,
+ dump_buf + offset,
+ dump,
+ "vfc_cam",
+ 0,
+ total_size,
+ 256,
+ false, "vfc_cam", storm->letter);
+
+ if (!dump)
+ return offset + total_size;
+
+ /* Prepare CAM address */
+ SET_VAR_FIELD(cam_addr, VFC_CAM_ADDR, OP, VFC_OPCODE_CAM_RD);
+
+ /* Read VFC CAM data */
+ for (row = 0; row < VFC_CAM_NUM_ROWS; row++) {
+ SET_VAR_FIELD(cam_cmd, VFC_CAM_CMD, ROW, row);
+ offset += qed_grc_dump_read_from_vfc(p_hwfn,
+ p_ptt,
+ storm,
+ cam_cmd,
+ VFC_CAM_CMD_DWORDS,
+ cam_addr,
+ VFC_CAM_ADDR_DWORDS,
+ VFC_CAM_RESP_DWORDS,
+ dump_buf + offset);
+ }
+
+ return offset;
+}
+
+/* Dump VFC RAM. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_vfc_ram(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump,
+ u8 storm_id, struct vfc_ram_defs *ram_defs)
+{
+ u32 total_size = ram_defs->num_rows * VFC_RAM_RESP_DWORDS;
+ struct storm_defs *storm = &s_storm_defs[storm_id];
+ u32 ram_addr[VFC_RAM_ADDR_DWORDS] = { 0 };
+ u32 ram_cmd[VFC_RAM_CMD_DWORDS] = { 0 };
+ u32 row, offset = 0;
+
+ offset += qed_grc_dump_mem_hdr(p_hwfn,
+ dump_buf + offset,
+ dump,
+ ram_defs->mem_name,
+ 0,
+ total_size,
+ 256,
+ false,
+ ram_defs->type_name,
+ storm->letter);
+
+ if (!dump)
+ return offset + total_size;
+
+ /* Prepare RAM address */
+ SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, OP, VFC_OPCODE_RAM_RD);
+
+ /* Read VFC RAM data */
+ for (row = ram_defs->base_row;
+ row < ram_defs->base_row + ram_defs->num_rows; row++) {
+ SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, ROW, row);
+ offset += qed_grc_dump_read_from_vfc(p_hwfn,
+ p_ptt,
+ storm,
+ ram_cmd,
+ VFC_RAM_CMD_DWORDS,
+ ram_addr,
+ VFC_RAM_ADDR_DWORDS,
+ VFC_RAM_RESP_DWORDS,
+ dump_buf + offset);
+ }
+
+ return offset;
+}
+
+/* Dumps GRC VFC data. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_vfc(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
+{
+ u8 storm_id, i;
+ u32 offset = 0;
+
+ for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
+ if (!qed_grc_is_storm_included(p_hwfn,
+ (enum dbg_storms)storm_id) ||
+ !s_storm_defs[storm_id].has_vfc)
+ continue;
+
+ /* Read CAM */
+ offset += qed_grc_dump_vfc_cam(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump, storm_id);
+
+ /* Read RAM */
+ for (i = 0; i < NUM_VFC_RAM_TYPES; i++)
+ offset += qed_grc_dump_vfc_ram(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ storm_id,
+ &s_vfc_ram_defs[i]);
+ }
+
+ return offset;
+}
+
+/* Dumps GRC RSS data. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 offset = 0;
+ u8 rss_mem_id;
+
+ for (rss_mem_id = 0; rss_mem_id < NUM_RSS_MEM_TYPES; rss_mem_id++) {
+ u32 rss_addr, num_entries, total_dwords;
+ struct rss_mem_defs *rss_defs;
+ u32 addr, num_dwords_to_read;
+ bool packed;
+
+ rss_defs = &s_rss_mem_defs[rss_mem_id];
+ rss_addr = rss_defs->addr;
+ num_entries = rss_defs->num_entries[dev_data->chip_id];
+ total_dwords = (num_entries * rss_defs->entry_width) / 32;
+ packed = (rss_defs->entry_width == 16);
+
+ offset += qed_grc_dump_mem_hdr(p_hwfn,
+ dump_buf + offset,
+ dump,
+ rss_defs->mem_name,
+ 0,
+ total_dwords,
+ rss_defs->entry_width,
+ packed,
+ rss_defs->type_name, 0);
+
+ /* Dump RSS data */
+ if (!dump) {
+ offset += total_dwords;
+ continue;
+ }
+
+ addr = BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA);
+ while (total_dwords) {
+ num_dwords_to_read = min_t(u32,
+ RSS_REG_RSS_RAM_DATA_SIZE,
+ total_dwords);
+ qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr);
+ offset += qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ num_dwords_to_read,
+ false,
+ SPLIT_TYPE_NONE, 0);
+ total_dwords -= num_dwords_to_read;
+ rss_addr++;
+ }
+ }
+
+ return offset;
+}
+
+/* Dumps GRC Big RAM. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf, bool dump, u8 big_ram_id)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 block_size, ram_size, offset = 0, reg_val, i;
+ char mem_name[12] = "???_BIG_RAM";
+ char type_name[8] = "???_RAM";
+ struct big_ram_defs *big_ram;
+
+ big_ram = &s_big_ram_defs[big_ram_id];
+ ram_size = big_ram->ram_size[dev_data->chip_id];
+
+ reg_val = qed_rd(p_hwfn, p_ptt, big_ram->is_256b_reg_addr);
+ block_size = reg_val &
+ BIT(big_ram->is_256b_bit_offset[dev_data->chip_id]) ? 256
+ : 128;
+
+ strncpy(type_name, big_ram->instance_name, BIG_RAM_NAME_LEN);
+ strncpy(mem_name, big_ram->instance_name, BIG_RAM_NAME_LEN);
+
+ /* Dump memory header */
+ offset += qed_grc_dump_mem_hdr(p_hwfn,
+ dump_buf + offset,
+ dump,
+ mem_name,
+ 0,
+ ram_size,
+ block_size * 8,
+ false, type_name, 0);
+
+ /* Read and dump Big RAM data */
+ if (!dump)
+ return offset + ram_size;
+
+ /* Dump Big RAM */
+ for (i = 0; i < DIV_ROUND_UP(ram_size, BRB_REG_BIG_RAM_DATA_SIZE);
+ i++) {
+ u32 addr, len;
+
+ qed_wr(p_hwfn, p_ptt, big_ram->addr_reg_addr, i);
+ addr = BYTES_TO_DWORDS(big_ram->data_reg_addr);
+ len = BRB_REG_BIG_RAM_DATA_SIZE;
+ offset += qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ len,
+ false, SPLIT_TYPE_NONE, 0);
+ }
+
+ return offset;
+}
+
+/* Dumps MCP scratchpad. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
+{
+ bool block_enable[MAX_BLOCK_ID] = { 0 };
+ u32 offset = 0, addr;
+ bool halted = false;
+
+ /* Halt MCP */
+ if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
+ halted = !qed_mcp_halt(p_hwfn, p_ptt);
+ if (!halted)
+ DP_NOTICE(p_hwfn, "MCP halt failed!\n");
+ }
+
+ /* Dump MCP scratchpad */
+ offset += qed_grc_dump_mem(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ NULL,
+ BYTES_TO_DWORDS(MCP_REG_SCRATCH),
+ MCP_REG_SCRATCH_SIZE,
+ false, 0, false, "MCP", 0);
+
+ /* Dump MCP cpu_reg_file */
+ offset += qed_grc_dump_mem(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ NULL,
+ BYTES_TO_DWORDS(MCP_REG_CPU_REG_FILE),
+ MCP_REG_CPU_REG_FILE_SIZE,
+ false, 0, false, "MCP", 0);
+
+ /* Dump MCP registers */
+ block_enable[BLOCK_MCP] = true;
+ offset += qed_grc_dump_registers(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump, block_enable, "MCP");
+
+ /* Dump required non-MCP registers */
+ offset += qed_grc_dump_regs_hdr(dump_buf + offset,
+ dump, 1, SPLIT_TYPE_NONE, 0,
+ "MCP");
+ addr = BYTES_TO_DWORDS(MISC_REG_SHARED_MEM_ADDR);
+ offset += qed_grc_dump_reg_entry(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ 1,
+ false, SPLIT_TYPE_NONE, 0);
+
+ /* Release MCP */
+ if (halted && qed_mcp_resume(p_hwfn, p_ptt))
+ DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
+
+ return offset;
+}
+
+/* Dumps the tbus indirect memory for all PHYs.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
+{
+ u32 offset = 0, tbus_lo_offset, tbus_hi_offset;
+ char mem_name[32];
+ u8 phy_id;
+
+ for (phy_id = 0; phy_id < ARRAY_SIZE(s_phy_defs); phy_id++) {
+ u32 addr_lo_addr, addr_hi_addr, data_lo_addr, data_hi_addr;
+ struct phy_defs *phy_defs;
+ u8 *bytes_buf;
+
+ phy_defs = &s_phy_defs[phy_id];
+ addr_lo_addr = phy_defs->base_addr +
+ phy_defs->tbus_addr_lo_addr;
+ addr_hi_addr = phy_defs->base_addr +
+ phy_defs->tbus_addr_hi_addr;
+ data_lo_addr = phy_defs->base_addr +
+ phy_defs->tbus_data_lo_addr;
+ data_hi_addr = phy_defs->base_addr +
+ phy_defs->tbus_data_hi_addr;
+
+ if (snprintf(mem_name, sizeof(mem_name), "tbus_%s",
+ phy_defs->phy_name) < 0)
+ DP_NOTICE(p_hwfn,
+ "Unexpected debug error: invalid PHY memory name\n");
+
+ offset += qed_grc_dump_mem_hdr(p_hwfn,
+ dump_buf + offset,
+ dump,
+ mem_name,
+ 0,
+ PHY_DUMP_SIZE_DWORDS,
+ 16, true, mem_name, 0);
+
+ if (!dump) {
+ offset += PHY_DUMP_SIZE_DWORDS;
+ continue;
+ }
+
+ bytes_buf = (u8 *)(dump_buf + offset);
+ for (tbus_hi_offset = 0;
+ tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8);
+ tbus_hi_offset++) {
+ qed_wr(p_hwfn, p_ptt, addr_hi_addr, tbus_hi_offset);
+ for (tbus_lo_offset = 0; tbus_lo_offset < 256;
+ tbus_lo_offset++) {
+ qed_wr(p_hwfn,
+ p_ptt, addr_lo_addr, tbus_lo_offset);
+ *(bytes_buf++) = (u8)qed_rd(p_hwfn,
+ p_ptt,
+ data_lo_addr);
+ *(bytes_buf++) = (u8)qed_rd(p_hwfn,
+ p_ptt,
+ data_hi_addr);
+ }
+ }
+
+ offset += PHY_DUMP_SIZE_DWORDS;
+ }
+
+ return offset;
+}
+
+/* Dumps the MCP HW dump from NVRAM. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_mcp_hw_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf, bool dump)
+{
+ u32 hw_dump_offset_bytes = 0, hw_dump_size_bytes = 0;
+ u32 hw_dump_size_dwords = 0, offset = 0;
+ enum dbg_status status;
+
+ /* Read HW dump image from NVRAM */
+ status = qed_find_nvram_image(p_hwfn,
+ p_ptt,
+ NVM_TYPE_HW_DUMP_OUT,
+ &hw_dump_offset_bytes,
+ &hw_dump_size_bytes,
+ false);
+ if (status != DBG_STATUS_OK)
+ return 0;
+
+ hw_dump_size_dwords = BYTES_TO_DWORDS(hw_dump_size_bytes);
+
+ /* Dump HW dump image section */
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ dump, "mcp_hw_dump", 1);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "size", hw_dump_size_dwords);
+
+ /* Read MCP HW dump image into dump buffer */
+ if (dump && hw_dump_size_dwords) {
+ status = qed_nvram_read(p_hwfn,
+ p_ptt,
+ hw_dump_offset_bytes,
+ hw_dump_size_bytes,
+ dump_buf + offset,
+ false);
+ if (status != DBG_STATUS_OK) {
+ DP_NOTICE(p_hwfn,
+ "Failed to read MCP HW Dump image from NVRAM\n");
+ return 0;
+ }
+ }
+ offset += hw_dump_size_dwords;
+
+ return offset;
+}
+
+/* Dumps Static Debug data. Returns the dumped size in dwords. */
+static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf, bool dump)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 block_id, line_id, offset = 0, addr, len;
+
+ /* Don't dump static debug if a debug bus recording is in progress */
+ if (dump && qed_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON))
+ return 0;
+
+ if (dump) {
+ /* Disable debug bus in all blocks */
+ qed_bus_disable_blocks(p_hwfn, p_ptt);
+
+ qed_bus_reset_dbg_block(p_hwfn, p_ptt);
+ qed_wr(p_hwfn,
+ p_ptt, DBG_REG_FRAMING_MODE, DBG_BUS_FRAME_MODE_8HW);
+ qed_wr(p_hwfn,
+ p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_INT_BUF);
+ qed_wr(p_hwfn, p_ptt, DBG_REG_FULL_MODE, 1);
+ qed_bus_enable_dbg_block(p_hwfn, p_ptt, true);
+ }
+
+ /* Dump all static debug lines for each relevant block */
+ for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) {
+ const struct dbg_block_chip *block_per_chip;
+ const struct dbg_block *block;
+ bool is_removed, has_dbg_bus;
+ u16 modes_buf_offset;
+ u32 block_dwords;
+
+ block_per_chip =
+ qed_get_dbg_block_per_chip(p_hwfn, (enum block_id)block_id);
+ is_removed = GET_FIELD(block_per_chip->flags,
+ DBG_BLOCK_CHIP_IS_REMOVED);
+ has_dbg_bus = GET_FIELD(block_per_chip->flags,
+ DBG_BLOCK_CHIP_HAS_DBG_BUS);
+
+ if (!is_removed && has_dbg_bus &&
+ GET_FIELD(block_per_chip->dbg_bus_mode.data,
+ DBG_MODE_HDR_EVAL_MODE) > 0) {
+ modes_buf_offset =
+ GET_FIELD(block_per_chip->dbg_bus_mode.data,
+ DBG_MODE_HDR_MODES_BUF_OFFSET);
+ if (!qed_is_mode_match(p_hwfn, &modes_buf_offset))
+ has_dbg_bus = false;
+ }
+
+ if (is_removed || !has_dbg_bus)
+ continue;
+
+ block_dwords = NUM_DBG_LINES(block_per_chip) *
+ STATIC_DEBUG_LINE_DWORDS;
+
+ /* Dump static section params */
+ block = get_dbg_block(p_hwfn, (enum block_id)block_id);
+ offset += qed_grc_dump_mem_hdr(p_hwfn,
+ dump_buf + offset,
+ dump,
+ block->name,
+ 0,
+ block_dwords,
+ 32, false, "STATIC", 0);
+
+ if (!dump) {
+ offset += block_dwords;
+ continue;
+ }
+
+ /* If all lines are invalid - dump zeros */
+ if (dev_data->block_in_reset[block_id]) {
+ memset(dump_buf + offset, 0,
+ DWORDS_TO_BYTES(block_dwords));
+ offset += block_dwords;
+ continue;
+ }
+
+ /* Enable block's client */
+ qed_bus_enable_clients(p_hwfn,
+ p_ptt,
+ BIT(block_per_chip->dbg_client_id));
+
+ addr = BYTES_TO_DWORDS(DBG_REG_CALENDAR_OUT_DATA);
+ len = STATIC_DEBUG_LINE_DWORDS;
+ for (line_id = 0; line_id < (u32)NUM_DBG_LINES(block_per_chip);
+ line_id++) {
+ /* Configure debug line ID */
+ qed_bus_config_dbg_line(p_hwfn,
+ p_ptt,
+ (enum block_id)block_id,
+ (u8)line_id, 0xf, 0, 0, 0);
+
+ /* Read debug line info */
+ offset += qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ len,
+ true, SPLIT_TYPE_NONE,
+ 0);
+ }
+
+ /* Disable block's client and debug output */
+ qed_bus_enable_clients(p_hwfn, p_ptt, 0);
+ qed_bus_config_dbg_line(p_hwfn, p_ptt,
+ (enum block_id)block_id, 0, 0, 0, 0, 0);
+ }
+
+ if (dump) {
+ qed_bus_enable_dbg_block(p_hwfn, p_ptt, false);
+ qed_bus_enable_clients(p_hwfn, p_ptt, 0);
+ }
+
+ return offset;
+}
+
+/* Performs GRC Dump to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump, u32 *num_dumped_dwords)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ bool parities_masked = false;
+ u32 dwords_read, offset = 0;
+ u8 i;
+
+ *num_dumped_dwords = 0;
+ dev_data->num_regs_read = 0;
+
+ /* Update reset state */
+ if (dump)
+ qed_update_blocks_reset_state(p_hwfn, p_ptt);
+
+ /* Dump global params */
+ offset += qed_dump_common_global_params(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump, 4);
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, "dump-type", "grc-dump");
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "num-lcids",
+ NUM_OF_LCIDS);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "num-ltids",
+ NUM_OF_LTIDS);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "num-ports", dev_data->num_ports);
+
+ /* Dump reset registers (dumped before taking blocks out of reset ) */
+ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
+ offset += qed_grc_dump_reset_regs(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump);
+
+ /* Take all blocks out of reset (using reset registers) */
+ if (dump) {
+ qed_grc_unreset_blocks(p_hwfn, p_ptt, false);
+ qed_update_blocks_reset_state(p_hwfn, p_ptt);
+ }
+
+ /* Disable all parities using MFW command */
+ if (dump &&
+ !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) {
+ parities_masked = !qed_mcp_mask_parities(p_hwfn, p_ptt, 1);
+ if (!parities_masked) {
+ DP_NOTICE(p_hwfn,
+ "Failed to mask parities using MFW\n");
+ if (qed_grc_get_param
+ (p_hwfn, DBG_GRC_PARAM_PARITY_SAFE))
+ return DBG_STATUS_MCP_COULD_NOT_MASK_PRTY;
+ }
+ }
+
+ /* Dump modified registers (dumped before modifying them) */
+ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS))
+ offset += qed_grc_dump_modified_regs(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump);
+
+ /* Stall storms */
+ if (dump &&
+ (qed_grc_is_included(p_hwfn,
+ DBG_GRC_PARAM_DUMP_IOR) ||
+ qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC)))
+ qed_grc_stall_storms(p_hwfn, p_ptt, true);
+
+ /* Dump all regs */
+ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS)) {
+ bool block_enable[MAX_BLOCK_ID];
+
+ /* Dump all blocks except MCP */
+ for (i = 0; i < MAX_BLOCK_ID; i++)
+ block_enable[i] = true;
+ block_enable[BLOCK_MCP] = false;
+ offset += qed_grc_dump_registers(p_hwfn,
+ p_ptt,
+ dump_buf +
+ offset,
+ dump,
+ block_enable, NULL);
+
+ /* Dump special registers */
+ offset += qed_grc_dump_special_regs(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump);
+ }
+
+ /* Dump memories */
+ offset += qed_grc_dump_memories(p_hwfn, p_ptt, dump_buf + offset, dump);
+
+ /* Dump MCP */
+ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP))
+ offset += qed_grc_dump_mcp(p_hwfn,
+ p_ptt, dump_buf + offset, dump);
+
+ /* Dump context */
+ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX))
+ offset += qed_grc_dump_ctx(p_hwfn,
+ p_ptt, dump_buf + offset, dump);
+
+ /* Dump RSS memories */
+ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RSS))
+ offset += qed_grc_dump_rss(p_hwfn,
+ p_ptt, dump_buf + offset, dump);
+
+ /* Dump Big RAM */
+ for (i = 0; i < NUM_BIG_RAM_TYPES; i++)
+ if (qed_grc_is_included(p_hwfn, s_big_ram_defs[i].grc_param))
+ offset += qed_grc_dump_big_ram(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump, i);
+
+ /* Dump VFC */
+ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC)) {
+ dwords_read = qed_grc_dump_vfc(p_hwfn,
+ p_ptt, dump_buf + offset, dump);
+ offset += dwords_read;
+ if (!dwords_read)
+ return DBG_STATUS_VFC_READ_ERROR;
+ }
+
+ /* Dump PHY tbus */
+ if (qed_grc_is_included(p_hwfn,
+ DBG_GRC_PARAM_DUMP_PHY) && dev_data->chip_id ==
+ CHIP_K2 && dev_data->hw_type == HW_TYPE_ASIC)
+ offset += qed_grc_dump_phy(p_hwfn,
+ p_ptt, dump_buf + offset, dump);
+
+ /* Dump MCP HW Dump */
+ if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP_HW_DUMP) &&
+ !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP) && 1)
+ offset += qed_grc_dump_mcp_hw_dump(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump);
+
+ /* Dump static debug data (only if not during debug bus recording) */
+ if (qed_grc_is_included(p_hwfn,
+ DBG_GRC_PARAM_DUMP_STATIC) &&
+ (!dump || dev_data->bus.state == DBG_BUS_STATE_IDLE))
+ offset += qed_grc_dump_static_debug(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump);
+
+ /* Dump last section */
+ offset += qed_dump_last_section(dump_buf, offset, dump);
+
+ if (dump) {
+ /* Unstall storms */
+ if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_UNSTALL))
+ qed_grc_stall_storms(p_hwfn, p_ptt, false);
+
+ /* Clear parity status */
+ qed_grc_clear_all_prty(p_hwfn, p_ptt);
+
+ /* Enable all parities using MFW command */
+ if (parities_masked)
+ qed_mcp_mask_parities(p_hwfn, p_ptt, 0);
+ }
+
+ *num_dumped_dwords = offset;
+
+ return DBG_STATUS_OK;
+}
+
+/* Writes the specified failing Idle Check rule to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump,
+ u16 rule_id,
+ const struct dbg_idle_chk_rule *rule,
+ u16 fail_entry_id, u32 *cond_reg_values)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ const struct dbg_idle_chk_cond_reg *cond_regs;
+ const struct dbg_idle_chk_info_reg *info_regs;
+ u32 i, next_reg_offset = 0, offset = 0;
+ struct dbg_idle_chk_result_hdr *hdr;
+ const union dbg_idle_chk_reg *regs;
+ u8 reg_id;
+
+ hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
+ regs = (const union dbg_idle_chk_reg *)
+ p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr +
+ rule->reg_offset;
+ cond_regs = &regs[0].cond_reg;
+ info_regs = &regs[rule->num_cond_regs].info_reg;
+
+ /* Dump rule data */
+ if (dump) {
+ memset(hdr, 0, sizeof(*hdr));
+ hdr->rule_id = rule_id;
+ hdr->mem_entry_id = fail_entry_id;
+ hdr->severity = rule->severity;
+ hdr->num_dumped_cond_regs = rule->num_cond_regs;
+ }
+
+ offset += IDLE_CHK_RESULT_HDR_DWORDS;
+
+ /* Dump condition register values */
+ for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) {
+ const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id];
+ struct dbg_idle_chk_result_reg_hdr *reg_hdr;
+
+ reg_hdr =
+ (struct dbg_idle_chk_result_reg_hdr *)(dump_buf + offset);
+
+ /* Write register header */
+ if (!dump) {
+ offset += IDLE_CHK_RESULT_REG_HDR_DWORDS +
+ reg->entry_size;
+ continue;
+ }
+
+ offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
+ memset(reg_hdr, 0, sizeof(*reg_hdr));
+ reg_hdr->start_entry = reg->start_entry;
+ reg_hdr->size = reg->entry_size;
+ SET_FIELD(reg_hdr->data,
+ DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM,
+ reg->num_entries > 1 || reg->start_entry > 0 ? 1 : 0);
+ SET_FIELD(reg_hdr->data,
+ DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, reg_id);
+
+ /* Write register values */
+ for (i = 0; i < reg_hdr->size; i++, next_reg_offset++, offset++)
+ dump_buf[offset] = cond_reg_values[next_reg_offset];
+ }
+
+ /* Dump info register values */
+ for (reg_id = 0; reg_id < rule->num_info_regs; reg_id++) {
+ const struct dbg_idle_chk_info_reg *reg = &info_regs[reg_id];
+ u32 block_id;
+
+ /* Check if register's block is in reset */
+ if (!dump) {
+ offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + reg->size;
+ continue;
+ }
+
+ block_id = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_BLOCK_ID);
+ if (block_id >= MAX_BLOCK_ID) {
+ DP_NOTICE(p_hwfn, "Invalid block_id\n");
+ return 0;
+ }
+
+ if (!dev_data->block_in_reset[block_id]) {
+ struct dbg_idle_chk_result_reg_hdr *reg_hdr;
+ bool wide_bus, eval_mode, mode_match = true;
+ u16 modes_buf_offset;
+ u32 addr;
+
+ reg_hdr = (struct dbg_idle_chk_result_reg_hdr *)
+ (dump_buf + offset);
+
+ /* Check mode */
+ eval_mode = GET_FIELD(reg->mode.data,
+ DBG_MODE_HDR_EVAL_MODE) > 0;
+ if (eval_mode) {
+ modes_buf_offset =
+ GET_FIELD(reg->mode.data,
+ DBG_MODE_HDR_MODES_BUF_OFFSET);
+ mode_match =
+ qed_is_mode_match(p_hwfn,
+ &modes_buf_offset);
+ }
+
+ if (!mode_match)
+ continue;
+
+ addr = GET_FIELD(reg->data,
+ DBG_IDLE_CHK_INFO_REG_ADDRESS);
+ wide_bus = GET_FIELD(reg->data,
+ DBG_IDLE_CHK_INFO_REG_WIDE_BUS);
+
+ /* Write register header */
+ offset += IDLE_CHK_RESULT_REG_HDR_DWORDS;
+ hdr->num_dumped_info_regs++;
+ memset(reg_hdr, 0, sizeof(*reg_hdr));
+ reg_hdr->size = reg->size;
+ SET_FIELD(reg_hdr->data,
+ DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID,
+ rule->num_cond_regs + reg_id);
+
+ /* Write register values */
+ offset += qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ addr,
+ reg->size, wide_bus,
+ SPLIT_TYPE_NONE, 0);
+ }
+ }
+
+ return offset;
+}
+
+/* Dumps idle check rule entries. Returns the dumped size in dwords. */
+static u32
+qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u32 *dump_buf, bool dump,
+ const struct dbg_idle_chk_rule *input_rules,
+ u32 num_input_rules, u32 *num_failing_rules)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 cond_reg_values[IDLE_CHK_MAX_ENTRIES_SIZE];
+ u32 i, offset = 0;
+ u16 entry_id;
+ u8 reg_id;
+
+ *num_failing_rules = 0;
+
+ for (i = 0; i < num_input_rules; i++) {
+ const struct dbg_idle_chk_cond_reg *cond_regs;
+ const struct dbg_idle_chk_rule *rule;
+ const union dbg_idle_chk_reg *regs;
+ u16 num_reg_entries = 1;
+ bool check_rule = true;
+ const u32 *imm_values;
+
+ rule = &input_rules[i];
+ regs = (const union dbg_idle_chk_reg *)
+ p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr +
+ rule->reg_offset;
+ cond_regs = &regs[0].cond_reg;
+ imm_values =
+ (u32 *)p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr +
+ rule->imm_offset;
+
+ /* Check if all condition register blocks are out of reset, and
+ * find maximal number of entries (all condition registers that
+ * are memories must have the same size, which is > 1).
+ */
+ for (reg_id = 0; reg_id < rule->num_cond_regs && check_rule;
+ reg_id++) {
+ u32 block_id =
+ GET_FIELD(cond_regs[reg_id].data,
+ DBG_IDLE_CHK_COND_REG_BLOCK_ID);
+
+ if (block_id >= MAX_BLOCK_ID) {
+ DP_NOTICE(p_hwfn, "Invalid block_id\n");
+ return 0;
+ }
+
+ check_rule = !dev_data->block_in_reset[block_id];
+ if (cond_regs[reg_id].num_entries > num_reg_entries)
+ num_reg_entries = cond_regs[reg_id].num_entries;
+ }
+
+ if (!check_rule && dump)
+ continue;
+
+ if (!dump) {
+ u32 entry_dump_size =
+ qed_idle_chk_dump_failure(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ false,
+ rule->rule_id,
+ rule,
+ 0,
+ NULL);
+
+ offset += num_reg_entries * entry_dump_size;
+ (*num_failing_rules) += num_reg_entries;
+ continue;
+ }
+
+ /* Go over all register entries (number of entries is the same
+ * for all condition registers).
+ */
+ for (entry_id = 0; entry_id < num_reg_entries; entry_id++) {
+ u32 next_reg_offset = 0;
+
+ /* Read current entry of all condition registers */
+ for (reg_id = 0; reg_id < rule->num_cond_regs;
+ reg_id++) {
+ const struct dbg_idle_chk_cond_reg *reg =
+ &cond_regs[reg_id];
+ u32 padded_entry_size, addr;
+ bool wide_bus;
+
+ /* Find GRC address (if it's a memory, the
+ * address of the specific entry is calculated).
+ */
+ addr = GET_FIELD(reg->data,
+ DBG_IDLE_CHK_COND_REG_ADDRESS);
+ wide_bus =
+ GET_FIELD(reg->data,
+ DBG_IDLE_CHK_COND_REG_WIDE_BUS);
+ if (reg->num_entries > 1 ||
+ reg->start_entry > 0) {
+ padded_entry_size =
+ reg->entry_size > 1 ?
+ roundup_pow_of_two(reg->entry_size) :
+ 1;
+ addr += (reg->start_entry + entry_id) *
+ padded_entry_size;
+ }
+
+ /* Read registers */
+ if (next_reg_offset + reg->entry_size >=
+ IDLE_CHK_MAX_ENTRIES_SIZE) {
+ DP_NOTICE(p_hwfn,
+ "idle check registers entry is too large\n");
+ return 0;
+ }
+
+ next_reg_offset +=
+ qed_grc_dump_addr_range(p_hwfn, p_ptt,
+ cond_reg_values +
+ next_reg_offset,
+ dump, addr,
+ reg->entry_size,
+ wide_bus,
+ SPLIT_TYPE_NONE, 0);
+ }
+
+ /* Call rule condition function.
+ * If returns true, it's a failure.
+ */
+ if ((*cond_arr[rule->cond_id]) (cond_reg_values,
+ imm_values)) {
+ offset += qed_idle_chk_dump_failure(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ rule->rule_id,
+ rule,
+ entry_id,
+ cond_reg_values);
+ (*num_failing_rules)++;
+ }
+ }
+ }
+
+ return offset;
+}
+
+/* Performs Idle Check Dump to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
+{
+ struct virt_mem_desc *dbg_buf =
+ &p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES];
+ u32 num_failing_rules_offset, offset = 0,
+ input_offset = 0, num_failing_rules = 0;
+
+ /* Dump global params - 1 must match below amount of params */
+ offset += qed_dump_common_global_params(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump, 1);
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, "dump-type", "idle-chk");
+
+ /* Dump idle check section header with a single parameter */
+ offset += qed_dump_section_hdr(dump_buf + offset, dump, "idle_chk", 1);
+ num_failing_rules_offset = offset;
+ offset += qed_dump_num_param(dump_buf + offset, dump, "num_rules", 0);
+
+ while (input_offset < BYTES_TO_DWORDS(dbg_buf->size)) {
+ const struct dbg_idle_chk_cond_hdr *cond_hdr =
+ (const struct dbg_idle_chk_cond_hdr *)dbg_buf->ptr +
+ input_offset++;
+ bool eval_mode, mode_match = true;
+ u32 curr_failing_rules;
+ u16 modes_buf_offset;
+
+ /* Check mode */
+ eval_mode = GET_FIELD(cond_hdr->mode.data,
+ DBG_MODE_HDR_EVAL_MODE) > 0;
+ if (eval_mode) {
+ modes_buf_offset =
+ GET_FIELD(cond_hdr->mode.data,
+ DBG_MODE_HDR_MODES_BUF_OFFSET);
+ mode_match = qed_is_mode_match(p_hwfn,
+ &modes_buf_offset);
+ }
+
+ if (mode_match) {
+ const struct dbg_idle_chk_rule *rule =
+ (const struct dbg_idle_chk_rule *)((u32 *)
+ dbg_buf->ptr
+ + input_offset);
+ u32 num_input_rules =
+ cond_hdr->data_size / IDLE_CHK_RULE_SIZE_DWORDS;
+ offset +=
+ qed_idle_chk_dump_rule_entries(p_hwfn,
+ p_ptt,
+ dump_buf +
+ offset,
+ dump,
+ rule,
+ num_input_rules,
+ &curr_failing_rules);
+ num_failing_rules += curr_failing_rules;
+ }
+
+ input_offset += cond_hdr->data_size;
+ }
+
+ /* Overwrite num_rules parameter */
+ if (dump)
+ qed_dump_num_param(dump_buf + num_failing_rules_offset,
+ dump, "num_rules", num_failing_rules);
+
+ /* Dump last section */
+ offset += qed_dump_last_section(dump_buf, offset, dump);
+
+ return offset;
+}
+
+/* Get info on the MCP Trace data in the scratchpad:
+ * - trace_data_grc_addr (OUT): trace data GRC address in bytes
+ * - trace_data_size (OUT): trace data size in bytes (without the header)
+ */
+static enum dbg_status qed_mcp_trace_get_data_info(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *trace_data_grc_addr,
+ u32 *trace_data_size)
+{
+ u32 spad_trace_offsize, signature;
+
+ /* Read trace section offsize structure from MCP scratchpad */
+ spad_trace_offsize = qed_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
+
+ /* Extract trace section address from offsize (in scratchpad) */
+ *trace_data_grc_addr =
+ MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize);
+
+ /* Read signature from MCP trace section */
+ signature = qed_rd(p_hwfn, p_ptt,
+ *trace_data_grc_addr +
+ offsetof(struct mcp_trace, signature));
+
+ if (signature != MFW_TRACE_SIGNATURE)
+ return DBG_STATUS_INVALID_TRACE_SIGNATURE;
+
+ /* Read trace size from MCP trace section */
+ *trace_data_size = qed_rd(p_hwfn,
+ p_ptt,
+ *trace_data_grc_addr +
+ offsetof(struct mcp_trace, size));
+
+ return DBG_STATUS_OK;
+}
+
+/* Reads MCP trace meta data image from NVRAM
+ * - running_bundle_id (OUT): running bundle ID (invalid when loaded from file)
+ * - trace_meta_offset (OUT): trace meta offset in NVRAM in bytes (invalid when
+ * loaded from file).
+ * - trace_meta_size (OUT): size in bytes of the trace meta data.
+ */
+static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 trace_data_size_bytes,
+ u32 *running_bundle_id,
+ u32 *trace_meta_offset,
+ u32 *trace_meta_size)
+{
+ u32 spad_trace_offsize, nvram_image_type, running_mfw_addr;
+
+ /* Read MCP trace section offsize structure from MCP scratchpad */
+ spad_trace_offsize = qed_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR);
+
+ /* Find running bundle ID */
+ running_mfw_addr =
+ MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize) +
+ QED_SECTION_SIZE(spad_trace_offsize) + trace_data_size_bytes;
+ *running_bundle_id = qed_rd(p_hwfn, p_ptt, running_mfw_addr);
+ if (*running_bundle_id > 1)
+ return DBG_STATUS_INVALID_NVRAM_BUNDLE;
+
+ /* Find image in NVRAM */
+ nvram_image_type =
+ (*running_bundle_id ==
+ DIR_ID_1) ? NVM_TYPE_MFW_TRACE1 : NVM_TYPE_MFW_TRACE2;
+ return qed_find_nvram_image(p_hwfn,
+ p_ptt,
+ nvram_image_type,
+ trace_meta_offset,
+ trace_meta_size,
+ true);
+}
+
+/* Reads the MCP Trace meta data from NVRAM into the specified buffer */
+static enum dbg_status qed_mcp_trace_read_meta(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 nvram_offset_in_bytes,
+ u32 size_in_bytes, u32 *buf)
+{
+ u8 modules_num, module_len, i, *byte_buf = (u8 *)buf;
+ enum dbg_status status;
+ u32 signature;
+
+ /* Read meta data from NVRAM */
+ status = qed_nvram_read(p_hwfn,
+ p_ptt,
+ nvram_offset_in_bytes,
+ size_in_bytes,
+ buf,
+ true);
+ if (status != DBG_STATUS_OK)
+ return status;
+
+ /* Extract and check first signature */
+ signature = qed_read_unaligned_dword(byte_buf);
+ byte_buf += sizeof(signature);
+ if (signature != NVM_MAGIC_VALUE)
+ return DBG_STATUS_INVALID_TRACE_SIGNATURE;
+
+ /* Extract number of modules */
+ modules_num = *(byte_buf++);
+
+ /* Skip all modules */
+ for (i = 0; i < modules_num; i++) {
+ module_len = *(byte_buf++);
+ byte_buf += module_len;
+ }
+
+ /* Extract and check second signature */
+ signature = qed_read_unaligned_dword(byte_buf);
+ byte_buf += sizeof(signature);
+ if (signature != NVM_MAGIC_VALUE)
+ return DBG_STATUS_INVALID_TRACE_SIGNATURE;
+
+ return DBG_STATUS_OK;
+}
+
+/* Dump MCP Trace */
+static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump, u32 *num_dumped_dwords)
+{
+ u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords;
+ u32 trace_meta_size_dwords = 0, running_bundle_id, offset = 0;
+ u32 trace_meta_offset_bytes = 0, trace_meta_size_bytes = 0;
+ enum dbg_status status;
+ int halted = 0;
+ bool use_mfw;
+
+ *num_dumped_dwords = 0;
+
+ use_mfw = !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP);
+
+ /* Get trace data info */
+ status = qed_mcp_trace_get_data_info(p_hwfn,
+ p_ptt,
+ &trace_data_grc_addr,
+ &trace_data_size_bytes);
+ if (status != DBG_STATUS_OK)
+ return status;
+
+ /* Dump global params */
+ offset += qed_dump_common_global_params(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump, 1);
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, "dump-type", "mcp-trace");
+
+ /* Halt MCP while reading from scratchpad so the read data will be
+ * consistent. if halt fails, MCP trace is taken anyway, with a small
+ * risk that it may be corrupt.
+ */
+ if (dump && use_mfw) {
+ halted = !qed_mcp_halt(p_hwfn, p_ptt);
+ if (!halted)
+ DP_NOTICE(p_hwfn, "MCP halt failed!\n");
+ }
+
+ /* Find trace data size */
+ trace_data_size_dwords =
+ DIV_ROUND_UP(trace_data_size_bytes + sizeof(struct mcp_trace),
+ BYTES_IN_DWORD);
+
+ /* Dump trace data section header and param */
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ dump, "mcp_trace_data", 1);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "size", trace_data_size_dwords);
+
+ /* Read trace data from scratchpad into dump buffer */
+ offset += qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ dump,
+ BYTES_TO_DWORDS(trace_data_grc_addr),
+ trace_data_size_dwords, false,
+ SPLIT_TYPE_NONE, 0);
+
+ /* Resume MCP (only if halt succeeded) */
+ if (halted && qed_mcp_resume(p_hwfn, p_ptt))
+ DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n");
+
+ /* Dump trace meta section header */
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ dump, "mcp_trace_meta", 1);
+
+ /* If MCP Trace meta size parameter was set, use it.
+ * Otherwise, read trace meta.
+ * trace_meta_size_bytes is dword-aligned.
+ */
+ trace_meta_size_bytes =
+ qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_MCP_TRACE_META_SIZE);
+ if ((!trace_meta_size_bytes || dump) && use_mfw)
+ status = qed_mcp_trace_get_meta_info(p_hwfn,
+ p_ptt,
+ trace_data_size_bytes,
+ &running_bundle_id,
+ &trace_meta_offset_bytes,
+ &trace_meta_size_bytes);
+ if (status == DBG_STATUS_OK)
+ trace_meta_size_dwords = BYTES_TO_DWORDS(trace_meta_size_bytes);
+
+ /* Dump trace meta size param */
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "size", trace_meta_size_dwords);
+
+ /* Read trace meta image into dump buffer */
+ if (dump && trace_meta_size_dwords)
+ status = qed_mcp_trace_read_meta(p_hwfn,
+ p_ptt,
+ trace_meta_offset_bytes,
+ trace_meta_size_bytes,
+ dump_buf + offset);
+ if (status == DBG_STATUS_OK)
+ offset += trace_meta_size_dwords;
+
+ /* Dump last section */
+ offset += qed_dump_last_section(dump_buf, offset, dump);
+
+ *num_dumped_dwords = offset;
+
+ /* If no mcp access, indicate that the dump doesn't contain the meta
+ * data from NVRAM.
+ */
+ return use_mfw ? status : DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
+}
+
+/* Dump GRC FIFO */
+static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump, u32 *num_dumped_dwords)
+{
+ u32 dwords_read, size_param_offset, offset = 0, addr, len;
+ bool fifo_has_data;
+
+ *num_dumped_dwords = 0;
+
+ /* Dump global params */
+ offset += qed_dump_common_global_params(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump, 1);
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, "dump-type", "reg-fifo");
+
+ /* Dump fifo data section header and param. The size param is 0 for
+ * now, and is overwritten after reading the FIFO.
+ */
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ dump, "reg_fifo_data", 1);
+ size_param_offset = offset;
+ offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
+
+ if (!dump) {
+ /* FIFO max size is REG_FIFO_DEPTH_DWORDS. There is no way to
+ * test how much data is available, except for reading it.
+ */
+ offset += REG_FIFO_DEPTH_DWORDS;
+ goto out;
+ }
+
+ fifo_has_data = qed_rd(p_hwfn, p_ptt,
+ GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
+
+ /* Pull available data from fifo. Use DMAE since this is widebus memory
+ * and must be accessed atomically. Test for dwords_read not passing
+ * buffer size since more entries could be added to the buffer as we are
+ * emptying it.
+ */
+ addr = BYTES_TO_DWORDS(GRC_REG_TRACE_FIFO);
+ len = REG_FIFO_ELEMENT_DWORDS;
+ for (dwords_read = 0;
+ fifo_has_data && dwords_read < REG_FIFO_DEPTH_DWORDS;
+ dwords_read += REG_FIFO_ELEMENT_DWORDS) {
+ offset += qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ true,
+ addr,
+ len,
+ true, SPLIT_TYPE_NONE,
+ 0);
+ fifo_has_data = qed_rd(p_hwfn, p_ptt,
+ GRC_REG_TRACE_FIFO_VALID_DATA) > 0;
+ }
+
+ qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
+ dwords_read);
+out:
+ /* Dump last section */
+ offset += qed_dump_last_section(dump_buf, offset, dump);
+
+ *num_dumped_dwords = offset;
+
+ return DBG_STATUS_OK;
+}
+
+/* Dump IGU FIFO */
+static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump, u32 *num_dumped_dwords)
+{
+ u32 dwords_read, size_param_offset, offset = 0, addr, len;
+ bool fifo_has_data;
+
+ *num_dumped_dwords = 0;
+
+ /* Dump global params */
+ offset += qed_dump_common_global_params(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump, 1);
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, "dump-type", "igu-fifo");
+
+ /* Dump fifo data section header and param. The size param is 0 for
+ * now, and is overwritten after reading the FIFO.
+ */
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ dump, "igu_fifo_data", 1);
+ size_param_offset = offset;
+ offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
+
+ if (!dump) {
+ /* FIFO max size is IGU_FIFO_DEPTH_DWORDS. There is no way to
+ * test how much data is available, except for reading it.
+ */
+ offset += IGU_FIFO_DEPTH_DWORDS;
+ goto out;
+ }
+
+ fifo_has_data = qed_rd(p_hwfn, p_ptt,
+ IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
+
+ /* Pull available data from fifo. Use DMAE since this is widebus memory
+ * and must be accessed atomically. Test for dwords_read not passing
+ * buffer size since more entries could be added to the buffer as we are
+ * emptying it.
+ */
+ addr = BYTES_TO_DWORDS(IGU_REG_ERROR_HANDLING_MEMORY);
+ len = IGU_FIFO_ELEMENT_DWORDS;
+ for (dwords_read = 0;
+ fifo_has_data && dwords_read < IGU_FIFO_DEPTH_DWORDS;
+ dwords_read += IGU_FIFO_ELEMENT_DWORDS) {
+ offset += qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ true,
+ addr,
+ len,
+ true, SPLIT_TYPE_NONE,
+ 0);
+ fifo_has_data = qed_rd(p_hwfn, p_ptt,
+ IGU_REG_ERROR_HANDLING_DATA_VALID) > 0;
+ }
+
+ qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
+ dwords_read);
+out:
+ /* Dump last section */
+ offset += qed_dump_last_section(dump_buf, offset, dump);
+
+ *num_dumped_dwords = offset;
+
+ return DBG_STATUS_OK;
+}
+
+/* Protection Override dump */
+static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump,
+ u32 *num_dumped_dwords)
+{
+ u32 size_param_offset, override_window_dwords, offset = 0, addr;
+
+ *num_dumped_dwords = 0;
+
+ /* Dump global params */
+ offset += qed_dump_common_global_params(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump, 1);
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, "dump-type", "protection-override");
+
+ /* Dump data section header and param. The size param is 0 for now,
+ * and is overwritten after reading the data.
+ */
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ dump, "protection_override_data", 1);
+ size_param_offset = offset;
+ offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
+
+ if (!dump) {
+ offset += PROTECTION_OVERRIDE_DEPTH_DWORDS;
+ goto out;
+ }
+
+ /* Add override window info to buffer */
+ override_window_dwords =
+ qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
+ PROTECTION_OVERRIDE_ELEMENT_DWORDS;
+ if (override_window_dwords) {
+ addr = BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW);
+ offset += qed_grc_dump_addr_range(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ true,
+ addr,
+ override_window_dwords,
+ true, SPLIT_TYPE_NONE, 0);
+ qed_dump_num_param(dump_buf + size_param_offset, dump, "size",
+ override_window_dwords);
+ }
+out:
+ /* Dump last section */
+ offset += qed_dump_last_section(dump_buf, offset, dump);
+
+ *num_dumped_dwords = offset;
+
+ return DBG_STATUS_OK;
+}
+
+/* Performs FW Asserts Dump to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ struct fw_asserts_ram_section *asserts;
+ char storm_letter_str[2] = "?";
+ struct fw_info fw_info;
+ u32 offset = 0;
+ u8 storm_id;
+
+ /* Dump global params */
+ offset += qed_dump_common_global_params(p_hwfn,
+ p_ptt,
+ dump_buf + offset, dump, 1);
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, "dump-type", "fw-asserts");
+
+ /* Find Storm dump size */
+ for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
+ u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx;
+ struct storm_defs *storm = &s_storm_defs[storm_id];
+ u32 last_list_idx, addr;
+
+ if (dev_data->block_in_reset[storm->sem_block_id])
+ continue;
+
+ /* Read FW info for the current Storm */
+ qed_read_storm_fw_info(p_hwfn, p_ptt, storm_id, &fw_info);
+
+ asserts = &fw_info.fw_asserts_section;
+
+ /* Dump FW Asserts section header and params */
+ storm_letter_str[0] = storm->letter;
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ dump, "fw_asserts", 2);
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump, "storm", storm_letter_str);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "size",
+ asserts->list_element_dword_size);
+
+ /* Read and dump FW Asserts data */
+ if (!dump) {
+ offset += asserts->list_element_dword_size;
+ continue;
+ }
+
+ addr = le16_to_cpu(asserts->section_ram_line_offset);
+ fw_asserts_section_addr = storm->sem_fast_mem_addr +
+ SEM_FAST_REG_INT_RAM +
+ RAM_LINES_TO_BYTES(addr);
+
+ next_list_idx_addr = fw_asserts_section_addr +
+ DWORDS_TO_BYTES(asserts->list_next_index_dword_offset);
+ next_list_idx = qed_rd(p_hwfn, p_ptt, next_list_idx_addr);
+ last_list_idx = (next_list_idx > 0 ?
+ next_list_idx :
+ asserts->list_num_elements) - 1;
+ addr = BYTES_TO_DWORDS(fw_asserts_section_addr) +
+ asserts->list_dword_offset +
+ last_list_idx * asserts->list_element_dword_size;
+ offset +=
+ qed_grc_dump_addr_range(p_hwfn, p_ptt,
+ dump_buf + offset,
+ dump, addr,
+ asserts->list_element_dword_size,
+ false, SPLIT_TYPE_NONE, 0);
+ }
+
+ /* Dump last section */
+ offset += qed_dump_last_section(dump_buf, offset, dump);
+
+ return offset;
+}
+
+/* Dumps the specified ILT pages to the specified buffer.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_ilt_dump_pages_range(u32 *dump_buf, u32 *given_offset,
+ bool *dump, u32 start_page_id,
+ u32 num_pages,
+ struct phys_mem_desc *ilt_pages,
+ bool dump_page_ids, u32 buf_size_in_dwords,
+ u32 *given_actual_dump_size_in_dwords)
+{
+ u32 actual_dump_size_in_dwords = *given_actual_dump_size_in_dwords;
+ u32 page_id, end_page_id, offset = *given_offset;
+ struct phys_mem_desc *mem_desc = NULL;
+ bool continue_dump = *dump;
+ u32 partial_page_size = 0;
+
+ if (num_pages == 0)
+ return offset;
+
+ end_page_id = start_page_id + num_pages - 1;
+
+ for (page_id = start_page_id; page_id <= end_page_id; page_id++) {
+ mem_desc = &ilt_pages[page_id];
+ if (!ilt_pages[page_id].virt_addr)
+ continue;
+
+ if (dump_page_ids) {
+ /* Copy page ID to dump buffer
+ * (if dump is needed and buffer is not full)
+ */
+ if ((continue_dump) &&
+ (offset + 1 > buf_size_in_dwords)) {
+ continue_dump = false;
+ actual_dump_size_in_dwords = offset;
+ }
+ if (continue_dump)
+ *(dump_buf + offset) = page_id;
+ offset++;
+ } else {
+ /* Copy page memory to dump buffer */
+ if ((continue_dump) &&
+ (offset + BYTES_TO_DWORDS(mem_desc->size) >
+ buf_size_in_dwords)) {
+ if (offset + BYTES_TO_DWORDS(mem_desc->size) >
+ buf_size_in_dwords) {
+ partial_page_size =
+ buf_size_in_dwords - offset;
+ memcpy(dump_buf + offset,
+ mem_desc->virt_addr,
+ partial_page_size);
+ continue_dump = false;
+ actual_dump_size_in_dwords =
+ offset + partial_page_size;
+ }
+ }
+
+ if (continue_dump)
+ memcpy(dump_buf + offset,
+ mem_desc->virt_addr, mem_desc->size);
+ offset += BYTES_TO_DWORDS(mem_desc->size);
+ }
+ }
+
+ *dump = continue_dump;
+ *given_offset = offset;
+ *given_actual_dump_size_in_dwords = actual_dump_size_in_dwords;
+
+ return offset;
+}
+
+/* Dumps a section containing the dumped ILT pages.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_ilt_dump_pages_section(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 *given_offset,
+ bool *dump,
+ u32 valid_conn_pf_pages,
+ u32 valid_conn_vf_pages,
+ struct phys_mem_desc *ilt_pages,
+ bool dump_page_ids,
+ u32 buf_size_in_dwords,
+ u32 *given_actual_dump_size_in_dwords)
+{
+ struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients;
+ u32 pf_start_line, start_page_id, offset = *given_offset;
+ u32 cdut_pf_init_pages, cdut_vf_init_pages;
+ u32 cdut_pf_work_pages, cdut_vf_work_pages;
+ u32 base_data_offset, size_param_offset;
+ u32 src_pages;
+ u32 section_header_and_param_size;
+ u32 cdut_pf_pages, cdut_vf_pages;
+ u32 actual_dump_size_in_dwords;
+ bool continue_dump = *dump;
+ bool update_size = *dump;
+ const char *section_name;
+ u32 i;
+
+ actual_dump_size_in_dwords = *given_actual_dump_size_in_dwords;
+ section_name = dump_page_ids ? "ilt_page_ids" : "ilt_page_mem";
+ cdut_pf_init_pages = qed_get_cdut_num_pf_init_pages(p_hwfn);
+ cdut_vf_init_pages = qed_get_cdut_num_vf_init_pages(p_hwfn);
+ cdut_pf_work_pages = qed_get_cdut_num_pf_work_pages(p_hwfn);
+ cdut_vf_work_pages = qed_get_cdut_num_vf_work_pages(p_hwfn);
+ cdut_pf_pages = cdut_pf_init_pages + cdut_pf_work_pages;
+ cdut_vf_pages = cdut_vf_init_pages + cdut_vf_work_pages;
+ pf_start_line = p_hwfn->p_cxt_mngr->pf_start_line;
+ section_header_and_param_size = qed_dump_section_hdr(NULL,
+ false,
+ section_name,
+ 1) +
+ qed_dump_num_param(NULL, false, "size", 0);
+
+ if ((continue_dump) &&
+ (offset + section_header_and_param_size > buf_size_in_dwords)) {
+ continue_dump = false;
+ update_size = false;
+ actual_dump_size_in_dwords = offset;
+ }
+
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ continue_dump, section_name, 1);
+
+ /* Dump size parameter (0 for now, overwritten with real size later) */
+ size_param_offset = offset;
+ offset += qed_dump_num_param(dump_buf + offset,
+ continue_dump, "size", 0);
+ base_data_offset = offset;
+
+ /* CDUC pages are ordered as follows:
+ * - PF pages - valid section (included in PF connection type mapping)
+ * - PF pages - invalid section (not dumped)
+ * - For each VF in the PF:
+ * - VF pages - valid section (included in VF connection type mapping)
+ * - VF pages - invalid section (not dumped)
+ */
+ if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_DUMP_ILT_CDUC)) {
+ /* Dump connection PF pages */
+ start_page_id = clients[ILT_CLI_CDUC].first.val - pf_start_line;
+ qed_ilt_dump_pages_range(dump_buf, &offset, &continue_dump,
+ start_page_id, valid_conn_pf_pages,
+ ilt_pages, dump_page_ids,
+ buf_size_in_dwords,
+ &actual_dump_size_in_dwords);
+
+ /* Dump connection VF pages */
+ start_page_id += clients[ILT_CLI_CDUC].pf_total_lines;
+ for (i = 0; i < p_hwfn->p_cxt_mngr->vf_count;
+ i++, start_page_id += clients[ILT_CLI_CDUC].vf_total_lines)
+ qed_ilt_dump_pages_range(dump_buf, &offset,
+ &continue_dump, start_page_id,
+ valid_conn_vf_pages,
+ ilt_pages, dump_page_ids,
+ buf_size_in_dwords,
+ &actual_dump_size_in_dwords);
+ }
+
+ /* CDUT pages are ordered as follows:
+ * - PF init pages (not dumped)
+ * - PF work pages
+ * - For each VF in the PF:
+ * - VF init pages (not dumped)
+ * - VF work pages
+ */
+ if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_DUMP_ILT_CDUT)) {
+ /* Dump task PF pages */
+ start_page_id = clients[ILT_CLI_CDUT].first.val +
+ cdut_pf_init_pages - pf_start_line;
+ qed_ilt_dump_pages_range(dump_buf, &offset, &continue_dump,
+ start_page_id, cdut_pf_work_pages,
+ ilt_pages, dump_page_ids,
+ buf_size_in_dwords,
+ &actual_dump_size_in_dwords);
+
+ /* Dump task VF pages */
+ start_page_id = clients[ILT_CLI_CDUT].first.val +
+ cdut_pf_pages + cdut_vf_init_pages - pf_start_line;
+ for (i = 0; i < p_hwfn->p_cxt_mngr->vf_count;
+ i++, start_page_id += cdut_vf_pages)
+ qed_ilt_dump_pages_range(dump_buf, &offset,
+ &continue_dump, start_page_id,
+ cdut_vf_work_pages, ilt_pages,
+ dump_page_ids,
+ buf_size_in_dwords,
+ &actual_dump_size_in_dwords);
+ }
+
+ /*Dump Searcher pages */
+ if (clients[ILT_CLI_SRC].active) {
+ start_page_id = clients[ILT_CLI_SRC].first.val - pf_start_line;
+ src_pages = clients[ILT_CLI_SRC].last.val -
+ clients[ILT_CLI_SRC].first.val + 1;
+ qed_ilt_dump_pages_range(dump_buf, &offset, &continue_dump,
+ start_page_id, src_pages, ilt_pages,
+ dump_page_ids, buf_size_in_dwords,
+ &actual_dump_size_in_dwords);
+ }
+
+ /* Overwrite size param */
+ if (update_size) {
+ u32 section_size = (*dump == continue_dump) ?
+ offset - base_data_offset :
+ actual_dump_size_in_dwords - base_data_offset;
+ if (section_size > 0)
+ qed_dump_num_param(dump_buf + size_param_offset,
+ *dump, "size", section_size);
+ else if ((section_size == 0) && (*dump != continue_dump))
+ actual_dump_size_in_dwords -=
+ section_header_and_param_size;
+ }
+
+ *dump = continue_dump;
+ *given_offset = offset;
+ *given_actual_dump_size_in_dwords = actual_dump_size_in_dwords;
+
+ return offset;
+}
+
+/* Dumps a section containing the global parameters.
+ * Part of ilt dump process
+ * Returns the dumped size in dwords.
+ */
+static u32
+qed_ilt_dump_dump_common_global_params(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump,
+ u32 cduc_page_size,
+ u32 conn_ctx_size,
+ u32 cdut_page_size,
+ u32 *full_dump_size_param_offset,
+ u32 *actual_dump_size_param_offset)
+{
+ struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients;
+ u32 offset = 0;
+
+ offset += qed_dump_common_global_params(p_hwfn, p_ptt,
+ dump_buf + offset,
+ dump, 30);
+ offset += qed_dump_str_param(dump_buf + offset,
+ dump,
+ "dump-type", "ilt-dump");
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "cduc-page-size",
+ cduc_page_size);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "cduc-first-page-id",
+ clients[ILT_CLI_CDUC].first.val);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "cduc-last-page-id",
+ clients[ILT_CLI_CDUC].last.val);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "cduc-num-pf-pages",
+ clients[ILT_CLI_CDUC].pf_total_lines);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "cduc-num-vf-pages",
+ clients[ILT_CLI_CDUC].vf_total_lines);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "max-conn-ctx-size",
+ conn_ctx_size);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "cdut-page-size",
+ cdut_page_size);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "cdut-first-page-id",
+ clients[ILT_CLI_CDUT].first.val);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "cdut-last-page-id",
+ clients[ILT_CLI_CDUT].last.val);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "cdut-num-pf-init-pages",
+ qed_get_cdut_num_pf_init_pages(p_hwfn));
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "cdut-num-vf-init-pages",
+ qed_get_cdut_num_vf_init_pages(p_hwfn));
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "cdut-num-pf-work-pages",
+ qed_get_cdut_num_pf_work_pages(p_hwfn));
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "cdut-num-vf-work-pages",
+ qed_get_cdut_num_vf_work_pages(p_hwfn));
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "max-task-ctx-size",
+ p_hwfn->p_cxt_mngr->task_ctx_size);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "first-vf-id-in-pf",
+ p_hwfn->p_cxt_mngr->first_vf_in_pf);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "num-vfs-in-pf",
+ p_hwfn->p_cxt_mngr->vf_count);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "ptr-size-bytes",
+ sizeof(void *));
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "pf-start-line",
+ p_hwfn->p_cxt_mngr->pf_start_line);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "page-mem-desc-size-dwords",
+ PAGE_MEM_DESC_SIZE_DWORDS);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "ilt-shadow-size",
+ p_hwfn->p_cxt_mngr->ilt_shadow_size);
+
+ *full_dump_size_param_offset = offset;
+
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "dump-size-full", 0);
+
+ *actual_dump_size_param_offset = offset;
+
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "dump-size-actual", 0);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "iscsi_task_pages",
+ p_hwfn->p_cxt_mngr->iscsi_task_pages);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "fcoe_task_pages",
+ p_hwfn->p_cxt_mngr->fcoe_task_pages);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "roce_task_pages",
+ p_hwfn->p_cxt_mngr->roce_task_pages);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "eth_task_pages",
+ p_hwfn->p_cxt_mngr->eth_task_pages);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "src-first-page-id",
+ clients[ILT_CLI_SRC].first.val);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "src-last-page-id",
+ clients[ILT_CLI_SRC].last.val);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "src-is-active",
+ clients[ILT_CLI_SRC].active);
+
+ /* Additional/Less parameters require matching of number in call to
+ * dump_common_global_params()
+ */
+
+ return offset;
+}
+
+/* Dump section containing number of PF CIDs per connection type.
+ * Part of ilt dump process.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_ilt_dump_dump_num_pf_cids(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ bool dump, u32 *valid_conn_pf_cids)
+{
+ u32 num_pf_cids = 0;
+ u32 offset = 0;
+ u8 conn_type;
+
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ dump, "num_pf_cids_per_conn_type", 1);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "size", NUM_OF_CONNECTION_TYPES);
+ for (conn_type = 0, *valid_conn_pf_cids = 0;
+ conn_type < NUM_OF_CONNECTION_TYPES; conn_type++, offset++) {
+ num_pf_cids = p_hwfn->p_cxt_mngr->conn_cfg[conn_type].cid_count;
+ if (dump)
+ *(dump_buf + offset) = num_pf_cids;
+ *valid_conn_pf_cids += num_pf_cids;
+ }
+
+ return offset;
+}
+
+/* Dump section containing number of VF CIDs per connection type
+ * Part of ilt dump process.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_ilt_dump_dump_num_vf_cids(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ bool dump, u32 *valid_conn_vf_cids)
+{
+ u32 num_vf_cids = 0;
+ u32 offset = 0;
+ u8 conn_type;
+
+ offset += qed_dump_section_hdr(dump_buf + offset, dump,
+ "num_vf_cids_per_conn_type", 1);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "size", NUM_OF_CONNECTION_TYPES);
+ for (conn_type = 0, *valid_conn_vf_cids = 0;
+ conn_type < NUM_OF_CONNECTION_TYPES; conn_type++, offset++) {
+ num_vf_cids =
+ p_hwfn->p_cxt_mngr->conn_cfg[conn_type].cids_per_vf;
+ if (dump)
+ *(dump_buf + offset) = num_vf_cids;
+ *valid_conn_vf_cids += num_vf_cids;
+ }
+
+ return offset;
+}
+
+/* Performs ILT Dump to the specified buffer.
+ * buf_size_in_dwords - The dumped buffer size.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_ilt_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf, u32 buf_size_in_dwords, bool dump)
+{
+#if ((!defined VMWARE) && (!defined UEFI))
+ struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients;
+#endif
+ u32 valid_conn_vf_cids = 0,
+ valid_conn_vf_pages, offset = 0, real_dumped_size = 0;
+ u32 valid_conn_pf_cids = 0, valid_conn_pf_pages, num_pages;
+ u32 num_cids_per_page, conn_ctx_size;
+ u32 cduc_page_size, cdut_page_size;
+ u32 actual_dump_size_in_dwords = 0;
+ struct phys_mem_desc *ilt_pages;
+ u32 actul_dump_off = 0;
+ u32 last_section_size;
+ u32 full_dump_off = 0;
+ u32 section_size = 0;
+ bool continue_dump;
+ u32 page_id;
+
+ last_section_size = qed_dump_last_section(NULL, 0, false);
+ cduc_page_size = 1 <<
+ (clients[ILT_CLI_CDUC].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN);
+ cdut_page_size = 1 <<
+ (clients[ILT_CLI_CDUT].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN);
+ conn_ctx_size = p_hwfn->p_cxt_mngr->conn_ctx_size;
+ num_cids_per_page = (int)(cduc_page_size / conn_ctx_size);
+ ilt_pages = p_hwfn->p_cxt_mngr->ilt_shadow;
+ continue_dump = dump;
+
+ /* if need to dump then save memory for the last section
+ * (last section calculates CRC of dumped data)
+ */
+ if (dump) {
+ if (buf_size_in_dwords >= last_section_size) {
+ buf_size_in_dwords -= last_section_size;
+ } else {
+ continue_dump = false;
+ actual_dump_size_in_dwords = offset;
+ }
+ }
+
+ /* Dump global params */
+
+ /* if need to dump then first check that there is enough memory
+ * in dumped buffer for this section calculate the size of this
+ * section without dumping. if there is not enough memory - then
+ * stop the dumping.
+ */
+ if (continue_dump) {
+ section_size =
+ qed_ilt_dump_dump_common_global_params(p_hwfn,
+ p_ptt,
+ NULL,
+ false,
+ cduc_page_size,
+ conn_ctx_size,
+ cdut_page_size,
+ &full_dump_off,
+ &actul_dump_off);
+ if (offset + section_size > buf_size_in_dwords) {
+ continue_dump = false;
+ actual_dump_size_in_dwords = offset;
+ }
+ }
+
+ offset += qed_ilt_dump_dump_common_global_params(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ continue_dump,
+ cduc_page_size,
+ conn_ctx_size,
+ cdut_page_size,
+ &full_dump_off,
+ &actul_dump_off);
+
+ /* Dump section containing number of PF CIDs per connection type
+ * If need to dump then first check that there is enough memory in
+ * dumped buffer for this section.
+ */
+ if (continue_dump) {
+ section_size =
+ qed_ilt_dump_dump_num_pf_cids(p_hwfn,
+ NULL,
+ false,
+ &valid_conn_pf_cids);
+ if (offset + section_size > buf_size_in_dwords) {
+ continue_dump = false;
+ actual_dump_size_in_dwords = offset;
+ }
+ }
+
+ offset += qed_ilt_dump_dump_num_pf_cids(p_hwfn,
+ dump_buf + offset,
+ continue_dump,
+ &valid_conn_pf_cids);
+
+ /* Dump section containing number of VF CIDs per connection type
+ * If need to dump then first check that there is enough memory in
+ * dumped buffer for this section.
+ */
+ if (continue_dump) {
+ section_size =
+ qed_ilt_dump_dump_num_vf_cids(p_hwfn,
+ NULL,
+ false,
+ &valid_conn_vf_cids);
+ if (offset + section_size > buf_size_in_dwords) {
+ continue_dump = false;
+ actual_dump_size_in_dwords = offset;
+ }
+ }
+
+ offset += qed_ilt_dump_dump_num_vf_cids(p_hwfn,
+ dump_buf + offset,
+ continue_dump,
+ &valid_conn_vf_cids);
+
+ /* Dump section containing physical memory descriptors for each
+ * ILT page.
+ */
+ num_pages = p_hwfn->p_cxt_mngr->ilt_shadow_size;
+
+ /* If need to dump then first check that there is enough memory
+ * in dumped buffer for the section header.
+ */
+ if (continue_dump) {
+ section_size = qed_dump_section_hdr(NULL,
+ false,
+ "ilt_page_desc",
+ 1) +
+ qed_dump_num_param(NULL,
+ false,
+ "size",
+ num_pages * PAGE_MEM_DESC_SIZE_DWORDS);
+ if (offset + section_size > buf_size_in_dwords) {
+ continue_dump = false;
+ actual_dump_size_in_dwords = offset;
+ }
+ }
+
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ continue_dump, "ilt_page_desc", 1);
+ offset += qed_dump_num_param(dump_buf + offset,
+ continue_dump,
+ "size",
+ num_pages * PAGE_MEM_DESC_SIZE_DWORDS);
+
+ /* Copy memory descriptors to dump buffer
+ * If need to dump then dump till the dump buffer size
+ */
+ if (continue_dump) {
+ for (page_id = 0; page_id < num_pages;
+ page_id++, offset += PAGE_MEM_DESC_SIZE_DWORDS) {
+ if (continue_dump &&
+ (offset + PAGE_MEM_DESC_SIZE_DWORDS <=
+ buf_size_in_dwords)) {
+ memcpy(dump_buf + offset,
+ &ilt_pages[page_id],
+ DWORDS_TO_BYTES
+ (PAGE_MEM_DESC_SIZE_DWORDS));
+ } else {
+ if (continue_dump) {
+ continue_dump = false;
+ actual_dump_size_in_dwords = offset;
+ }
+ }
+ }
+ } else {
+ offset += num_pages * PAGE_MEM_DESC_SIZE_DWORDS;
+ }
+
+ valid_conn_pf_pages = DIV_ROUND_UP(valid_conn_pf_cids,
+ num_cids_per_page);
+ valid_conn_vf_pages = DIV_ROUND_UP(valid_conn_vf_cids,
+ num_cids_per_page);
+
+ /* Dump ILT pages IDs */
+ qed_ilt_dump_pages_section(p_hwfn, dump_buf, &offset, &continue_dump,
+ valid_conn_pf_pages, valid_conn_vf_pages,
+ ilt_pages, true, buf_size_in_dwords,
+ &actual_dump_size_in_dwords);
+
+ /* Dump ILT pages memory */
+ qed_ilt_dump_pages_section(p_hwfn, dump_buf, &offset, &continue_dump,
+ valid_conn_pf_pages, valid_conn_vf_pages,
+ ilt_pages, false, buf_size_in_dwords,
+ &actual_dump_size_in_dwords);
+
+ real_dumped_size =
+ (continue_dump == dump) ? offset : actual_dump_size_in_dwords;
+ qed_dump_num_param(dump_buf + full_dump_off, dump,
+ "full-dump-size", offset + last_section_size);
+ qed_dump_num_param(dump_buf + actul_dump_off,
+ dump,
+ "actual-dump-size",
+ real_dumped_size + last_section_size);
+
+ /* Dump last section */
+ real_dumped_size += qed_dump_last_section(dump_buf,
+ real_dumped_size, dump);
+
+ return real_dumped_size;
+}
+
+/***************************** Public Functions *******************************/
+
+enum dbg_status qed_dbg_set_bin_ptr(struct qed_hwfn *p_hwfn,
+ const u8 * const bin_ptr)
+{
+ struct bin_buffer_hdr *buf_hdrs = (struct bin_buffer_hdr *)bin_ptr;
+ u8 buf_id;
+
+ /* Convert binary data to debug arrays */
+ for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++)
+ qed_set_dbg_bin_buf(p_hwfn,
+ buf_id,
+ (u32 *)(bin_ptr + buf_hdrs[buf_id].offset),
+ buf_hdrs[buf_id].length);
+
+ return DBG_STATUS_OK;
+}
+
+static enum dbg_status qed_dbg_set_app_ver(u32 ver)
+{
+ if (ver < TOOLS_VERSION)
+ return DBG_STATUS_UNSUPPORTED_APP_VERSION;
+
+ s_app_ver = ver;
+
+ return DBG_STATUS_OK;
+}
+
+bool qed_read_fw_info(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, struct fw_info *fw_info)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u8 storm_id;
+
+ for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) {
+ struct storm_defs *storm = &s_storm_defs[storm_id];
+
+ /* Skip Storm if it's in reset */
+ if (dev_data->block_in_reset[storm->sem_block_id])
+ continue;
+
+ /* Read FW info for the current Storm */
+ qed_read_storm_fw_info(p_hwfn, p_ptt, storm_id, fw_info);
+
+ return true;
+ }
+
+ return false;
+}
+
+enum dbg_status qed_dbg_grc_config(struct qed_hwfn *p_hwfn,
+ enum dbg_grc_params grc_param, u32 val)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ enum dbg_status status;
+ int i;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DEBUG,
+ "dbg_grc_config: paramId = %d, val = %d\n", grc_param, val);
+
+ status = qed_dbg_dev_init(p_hwfn);
+ if (status != DBG_STATUS_OK)
+ return status;
+
+ /* Initializes the GRC parameters (if not initialized). Needed in order
+ * to set the default parameter values for the first time.
+ */
+ qed_dbg_grc_init_params(p_hwfn);
+
+ if (grc_param >= MAX_DBG_GRC_PARAMS)
+ return DBG_STATUS_INVALID_ARGS;
+ if (val < s_grc_param_defs[grc_param].min ||
+ val > s_grc_param_defs[grc_param].max)
+ return DBG_STATUS_INVALID_ARGS;
+
+ if (s_grc_param_defs[grc_param].is_preset) {
+ /* Preset param */
+
+ /* Disabling a preset is not allowed. Call
+ * dbg_grc_set_params_default instead.
+ */
+ if (!val)
+ return DBG_STATUS_INVALID_ARGS;
+
+ /* Update all params with the preset values */
+ for (i = 0; i < MAX_DBG_GRC_PARAMS; i++) {
+ struct grc_param_defs *defs = &s_grc_param_defs[i];
+ u32 preset_val;
+ /* Skip persistent params */
+ if (defs->is_persistent)
+ continue;
+
+ /* Find preset value */
+ if (grc_param == DBG_GRC_PARAM_EXCLUDE_ALL)
+ preset_val =
+ defs->exclude_all_preset_val;
+ else if (grc_param == DBG_GRC_PARAM_CRASH)
+ preset_val =
+ defs->crash_preset_val[dev_data->chip_id];
+ else
+ return DBG_STATUS_INVALID_ARGS;
+
+ qed_grc_set_param(p_hwfn, i, preset_val);
+ }
+ } else {
+ /* Regular param - set its value */
+ qed_grc_set_param(p_hwfn, grc_param, val);
+ }
+
+ return DBG_STATUS_OK;
+}
+
+/* Assign default GRC param values */
+void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ u32 i;
+
+ for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
+ if (!s_grc_param_defs[i].is_persistent)
+ dev_data->grc.param_val[i] =
+ s_grc_param_defs[i].default_val[dev_data->chip_id];
+}
+
+enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size)
+{
+ enum dbg_status status = qed_dbg_dev_init(p_hwfn);
+
+ *buf_size = 0;
+
+ if (status != DBG_STATUS_OK)
+ return status;
+
+ if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
+ !p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr ||
+ !p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr ||
+ !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
+ !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
+ return DBG_STATUS_DBG_ARRAY_NOT_SET;
+
+ return qed_grc_dump(p_hwfn, p_ptt, NULL, false, buf_size);
+}
+
+enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords)
+{
+ u32 needed_buf_size_in_dwords;
+ enum dbg_status status;
+
+ *num_dumped_dwords = 0;
+
+ status = qed_dbg_grc_get_dump_buf_size(p_hwfn,
+ p_ptt,
+ &needed_buf_size_in_dwords);
+ if (status != DBG_STATUS_OK)
+ return status;
+
+ if (buf_size_in_dwords < needed_buf_size_in_dwords)
+ return DBG_STATUS_DUMP_BUF_TOO_SMALL;
+
+ /* Doesn't do anything, needed for compile time asserts */
+ qed_static_asserts();
+
+ /* GRC Dump */
+ status = qed_grc_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
+
+ /* Revert GRC params to their default */
+ qed_dbg_grc_set_params_default(p_hwfn);
+
+ return status;
+}
+
+enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size)
+{
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ struct idle_chk_data *idle_chk = &dev_data->idle_chk;
+ enum dbg_status status;
+
+ *buf_size = 0;
+
+ status = qed_dbg_dev_init(p_hwfn);
+ if (status != DBG_STATUS_OK)
+ return status;
+
+ if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
+ !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr ||
+ !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr ||
+ !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr)
+ return DBG_STATUS_DBG_ARRAY_NOT_SET;
+
+ if (!idle_chk->buf_size_set) {
+ idle_chk->buf_size = qed_idle_chk_dump(p_hwfn,
+ p_ptt, NULL, false);
+ idle_chk->buf_size_set = true;
+ }
+
+ *buf_size = idle_chk->buf_size;
+
+ return DBG_STATUS_OK;
+}
+
+enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords)
+{
+ u32 needed_buf_size_in_dwords;
+ enum dbg_status status;
+
+ *num_dumped_dwords = 0;
+
+ status = qed_dbg_idle_chk_get_dump_buf_size(p_hwfn,
+ p_ptt,
+ &needed_buf_size_in_dwords);
+ if (status != DBG_STATUS_OK)
+ return status;
+
+ if (buf_size_in_dwords < needed_buf_size_in_dwords)
+ return DBG_STATUS_DUMP_BUF_TOO_SMALL;
+
+ /* Update reset state */
+ qed_grc_unreset_blocks(p_hwfn, p_ptt, true);
+ qed_update_blocks_reset_state(p_hwfn, p_ptt);
+
+ /* Idle Check Dump */
+ *num_dumped_dwords = qed_idle_chk_dump(p_hwfn, p_ptt, dump_buf, true);
+
+ /* Revert GRC params to their default */
+ qed_dbg_grc_set_params_default(p_hwfn);
+
+ return DBG_STATUS_OK;
+}
+
+enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size)
+{
+ enum dbg_status status = qed_dbg_dev_init(p_hwfn);
+
+ *buf_size = 0;
+
+ if (status != DBG_STATUS_OK)
+ return status;
+
+ return qed_mcp_trace_dump(p_hwfn, p_ptt, NULL, false, buf_size);
+}
+
+enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords)
+{
+ u32 needed_buf_size_in_dwords;
+ enum dbg_status status;
+
+ status =
+ qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn,
+ p_ptt,
+ &needed_buf_size_in_dwords);
+ if (status != DBG_STATUS_OK && status !=
+ DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
+ return status;
+
+ if (buf_size_in_dwords < needed_buf_size_in_dwords)
+ return DBG_STATUS_DUMP_BUF_TOO_SMALL;
+
+ /* Update reset state */
+ qed_update_blocks_reset_state(p_hwfn, p_ptt);
+
+ /* Perform dump */
+ status = qed_mcp_trace_dump(p_hwfn,
+ p_ptt, dump_buf, true, num_dumped_dwords);
+
+ /* Revert GRC params to their default */
+ qed_dbg_grc_set_params_default(p_hwfn);
+
+ return status;
+}
+
+enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size)
+{
+ enum dbg_status status = qed_dbg_dev_init(p_hwfn);
+
+ *buf_size = 0;
+
+ if (status != DBG_STATUS_OK)
+ return status;
+
+ return qed_reg_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
+}
+
+enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords)
+{
+ u32 needed_buf_size_in_dwords;
+ enum dbg_status status;
+
+ *num_dumped_dwords = 0;
+
+ status = qed_dbg_reg_fifo_get_dump_buf_size(p_hwfn,
+ p_ptt,
+ &needed_buf_size_in_dwords);
+ if (status != DBG_STATUS_OK)
+ return status;
+
+ if (buf_size_in_dwords < needed_buf_size_in_dwords)
+ return DBG_STATUS_DUMP_BUF_TOO_SMALL;
+
+ /* Update reset state */
+ qed_update_blocks_reset_state(p_hwfn, p_ptt);
+
+ status = qed_reg_fifo_dump(p_hwfn,
+ p_ptt, dump_buf, true, num_dumped_dwords);
+
+ /* Revert GRC params to their default */
+ qed_dbg_grc_set_params_default(p_hwfn);
+
+ return status;
+}
+
+enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size)
+{
+ enum dbg_status status = qed_dbg_dev_init(p_hwfn);
+
+ *buf_size = 0;
+
+ if (status != DBG_STATUS_OK)
+ return status;
+
+ return qed_igu_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size);
+}
+
+enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords)
+{
+ u32 needed_buf_size_in_dwords;
+ enum dbg_status status;
+
+ *num_dumped_dwords = 0;
+
+ status = qed_dbg_igu_fifo_get_dump_buf_size(p_hwfn,
+ p_ptt,
+ &needed_buf_size_in_dwords);
+ if (status != DBG_STATUS_OK)
+ return status;
+
+ if (buf_size_in_dwords < needed_buf_size_in_dwords)
+ return DBG_STATUS_DUMP_BUF_TOO_SMALL;
+
+ /* Update reset state */
+ qed_update_blocks_reset_state(p_hwfn, p_ptt);
+
+ status = qed_igu_fifo_dump(p_hwfn,
+ p_ptt, dump_buf, true, num_dumped_dwords);
+ /* Revert GRC params to their default */
+ qed_dbg_grc_set_params_default(p_hwfn);
+
+ return status;
+}
+
+enum dbg_status
+qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size)
+{
+ enum dbg_status status = qed_dbg_dev_init(p_hwfn);
+
+ *buf_size = 0;
+
+ if (status != DBG_STATUS_OK)
+ return status;
+
+ return qed_protection_override_dump(p_hwfn,
+ p_ptt, NULL, false, buf_size);
+}
+
+enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords)
+{
+ u32 needed_buf_size_in_dwords, *p_size = &needed_buf_size_in_dwords;
+ enum dbg_status status;
+
+ *num_dumped_dwords = 0;
+
+ status =
+ qed_dbg_protection_override_get_dump_buf_size(p_hwfn,
+ p_ptt,
+ p_size);
+ if (status != DBG_STATUS_OK)
+ return status;
+
+ if (buf_size_in_dwords < needed_buf_size_in_dwords)
+ return DBG_STATUS_DUMP_BUF_TOO_SMALL;
+
+ /* Update reset state */
+ qed_update_blocks_reset_state(p_hwfn, p_ptt);
+
+ status = qed_protection_override_dump(p_hwfn,
+ p_ptt,
+ dump_buf,
+ true, num_dumped_dwords);
+
+ /* Revert GRC params to their default */
+ qed_dbg_grc_set_params_default(p_hwfn);
+
+ return status;
+}
+
+enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size)
+{
+ enum dbg_status status = qed_dbg_dev_init(p_hwfn);
+
+ *buf_size = 0;
+
+ if (status != DBG_STATUS_OK)
+ return status;
+
+ /* Update reset state */
+ qed_update_blocks_reset_state(p_hwfn, p_ptt);
+
+ *buf_size = qed_fw_asserts_dump(p_hwfn, p_ptt, NULL, false);
+
+ return DBG_STATUS_OK;
+}
+
+enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords)
+{
+ u32 needed_buf_size_in_dwords, *p_size = &needed_buf_size_in_dwords;
+ enum dbg_status status;
+
+ *num_dumped_dwords = 0;
+
+ status =
+ qed_dbg_fw_asserts_get_dump_buf_size(p_hwfn,
+ p_ptt,
+ p_size);
+ if (status != DBG_STATUS_OK)
+ return status;
+
+ if (buf_size_in_dwords < needed_buf_size_in_dwords)
+ return DBG_STATUS_DUMP_BUF_TOO_SMALL;
+
+ *num_dumped_dwords = qed_fw_asserts_dump(p_hwfn, p_ptt, dump_buf, true);
+
+ /* Revert GRC params to their default */
+ qed_dbg_grc_set_params_default(p_hwfn);
+
+ return DBG_STATUS_OK;
+}
+
+static enum dbg_status qed_dbg_ilt_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size)
+{
+ enum dbg_status status = qed_dbg_dev_init(p_hwfn);
+
+ *buf_size = 0;
+
+ if (status != DBG_STATUS_OK)
+ return status;
+
+ *buf_size = qed_ilt_dump(p_hwfn, p_ptt, NULL, 0, false);
+
+ return DBG_STATUS_OK;
+}
+
+static enum dbg_status qed_dbg_ilt_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords)
+{
+ *num_dumped_dwords = qed_ilt_dump(p_hwfn,
+ p_ptt,
+ dump_buf, buf_size_in_dwords, true);
+
+ /* Reveret GRC params to their default */
+ qed_dbg_grc_set_params_default(p_hwfn);
+
+ return DBG_STATUS_OK;
+}
+
+enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum block_id block_id,
+ enum dbg_attn_type attn_type,
+ bool clear_status,
+ struct dbg_attn_block_result *results)
+{
+ enum dbg_status status = qed_dbg_dev_init(p_hwfn);
+ u8 reg_idx, num_attn_regs, num_result_regs = 0;
+ const struct dbg_attn_reg *attn_reg_arr;
+
+ if (status != DBG_STATUS_OK)
+ return status;
+
+ if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr ||
+ !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr ||
+ !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr)
+ return DBG_STATUS_DBG_ARRAY_NOT_SET;
+
+ attn_reg_arr = qed_get_block_attn_regs(p_hwfn,
+ block_id,
+ attn_type, &num_attn_regs);
+
+ for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) {
+ const struct dbg_attn_reg *reg_data = &attn_reg_arr[reg_idx];
+ struct dbg_attn_reg_result *reg_result;
+ u32 sts_addr, sts_val;
+ u16 modes_buf_offset;
+ bool eval_mode;
+
+ /* Check mode */
+ eval_mode = GET_FIELD(reg_data->mode.data,
+ DBG_MODE_HDR_EVAL_MODE) > 0;
+ modes_buf_offset = GET_FIELD(reg_data->mode.data,
+ DBG_MODE_HDR_MODES_BUF_OFFSET);
+ if (eval_mode && !qed_is_mode_match(p_hwfn, &modes_buf_offset))
+ continue;
+
+ /* Mode match - read attention status register */
+ sts_addr = DWORDS_TO_BYTES(clear_status ?
+ reg_data->sts_clr_address :
+ GET_FIELD(reg_data->data,
+ DBG_ATTN_REG_STS_ADDRESS));
+ sts_val = qed_rd(p_hwfn, p_ptt, sts_addr);
+ if (!sts_val)
+ continue;
+
+ /* Non-zero attention status - add to results */
+ reg_result = &results->reg_results[num_result_regs];
+ SET_FIELD(reg_result->data,
+ DBG_ATTN_REG_RESULT_STS_ADDRESS, sts_addr);
+ SET_FIELD(reg_result->data,
+ DBG_ATTN_REG_RESULT_NUM_REG_ATTN,
+ GET_FIELD(reg_data->data, DBG_ATTN_REG_NUM_REG_ATTN));
+ reg_result->block_attn_offset = reg_data->block_attn_offset;
+ reg_result->sts_val = sts_val;
+ reg_result->mask_val = qed_rd(p_hwfn,
+ p_ptt,
+ DWORDS_TO_BYTES
+ (reg_data->mask_address));
+ num_result_regs++;
+ }
+
+ results->block_id = (u8)block_id;
+ results->names_offset =
+ qed_get_block_attn_data(p_hwfn, block_id, attn_type)->names_offset;
+ SET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE, attn_type);
+ SET_FIELD(results->data,
+ DBG_ATTN_BLOCK_RESULT_NUM_REGS, num_result_regs);
+
+ return DBG_STATUS_OK;
+}
+
+/******************************* Data Types **********************************/
+
+/* REG fifo element */
+struct reg_fifo_element {
+ u64 data;
+#define REG_FIFO_ELEMENT_ADDRESS_SHIFT 0
+#define REG_FIFO_ELEMENT_ADDRESS_MASK 0x7fffff
+#define REG_FIFO_ELEMENT_ACCESS_SHIFT 23
+#define REG_FIFO_ELEMENT_ACCESS_MASK 0x1
+#define REG_FIFO_ELEMENT_PF_SHIFT 24
+#define REG_FIFO_ELEMENT_PF_MASK 0xf
+#define REG_FIFO_ELEMENT_VF_SHIFT 28
+#define REG_FIFO_ELEMENT_VF_MASK 0xff
+#define REG_FIFO_ELEMENT_PORT_SHIFT 36
+#define REG_FIFO_ELEMENT_PORT_MASK 0x3
+#define REG_FIFO_ELEMENT_PRIVILEGE_SHIFT 38
+#define REG_FIFO_ELEMENT_PRIVILEGE_MASK 0x3
+#define REG_FIFO_ELEMENT_PROTECTION_SHIFT 40
+#define REG_FIFO_ELEMENT_PROTECTION_MASK 0x7
+#define REG_FIFO_ELEMENT_MASTER_SHIFT 43
+#define REG_FIFO_ELEMENT_MASTER_MASK 0xf
+#define REG_FIFO_ELEMENT_ERROR_SHIFT 47
+#define REG_FIFO_ELEMENT_ERROR_MASK 0x1f
+};
+
+/* REG fifo error element */
+struct reg_fifo_err {
+ u32 err_code;
+ const char *err_msg;
+};
+
+/* IGU fifo element */
+struct igu_fifo_element {
+ u32 dword0;
+#define IGU_FIFO_ELEMENT_DWORD0_FID_SHIFT 0
+#define IGU_FIFO_ELEMENT_DWORD0_FID_MASK 0xff
+#define IGU_FIFO_ELEMENT_DWORD0_IS_PF_SHIFT 8
+#define IGU_FIFO_ELEMENT_DWORD0_IS_PF_MASK 0x1
+#define IGU_FIFO_ELEMENT_DWORD0_SOURCE_SHIFT 9
+#define IGU_FIFO_ELEMENT_DWORD0_SOURCE_MASK 0xf
+#define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_SHIFT 13
+#define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_MASK 0xf
+#define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_SHIFT 17
+#define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_MASK 0x7fff
+ u32 dword1;
+ u32 dword2;
+#define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_SHIFT 0
+#define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_MASK 0x1
+#define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_SHIFT 1
+#define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_MASK 0xffffffff
+ u32 reserved;
+};
+
+struct igu_fifo_wr_data {
+ u32 data;
+#define IGU_FIFO_WR_DATA_PROD_CONS_SHIFT 0
+#define IGU_FIFO_WR_DATA_PROD_CONS_MASK 0xffffff
+#define IGU_FIFO_WR_DATA_UPDATE_FLAG_SHIFT 24
+#define IGU_FIFO_WR_DATA_UPDATE_FLAG_MASK 0x1
+#define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_SHIFT 25
+#define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_MASK 0x3
+#define IGU_FIFO_WR_DATA_SEGMENT_SHIFT 27
+#define IGU_FIFO_WR_DATA_SEGMENT_MASK 0x1
+#define IGU_FIFO_WR_DATA_TIMER_MASK_SHIFT 28
+#define IGU_FIFO_WR_DATA_TIMER_MASK_MASK 0x1
+#define IGU_FIFO_WR_DATA_CMD_TYPE_SHIFT 31
+#define IGU_FIFO_WR_DATA_CMD_TYPE_MASK 0x1
+};
+
+struct igu_fifo_cleanup_wr_data {
+ u32 data;
+#define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_SHIFT 0
+#define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_MASK 0x7ffffff
+#define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_SHIFT 27
+#define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_MASK 0x1
+#define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_SHIFT 28
+#define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_MASK 0x7
+#define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_SHIFT 31
+#define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_MASK 0x1
+};
+
+/* Protection override element */
+struct protection_override_element {
+ u64 data;
+#define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_SHIFT 0
+#define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_MASK 0x7fffff
+#define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_SHIFT 23
+#define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_MASK 0xffffff
+#define PROTECTION_OVERRIDE_ELEMENT_READ_SHIFT 47
+#define PROTECTION_OVERRIDE_ELEMENT_READ_MASK 0x1
+#define PROTECTION_OVERRIDE_ELEMENT_WRITE_SHIFT 48
+#define PROTECTION_OVERRIDE_ELEMENT_WRITE_MASK 0x1
+#define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_SHIFT 49
+#define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_MASK 0x7
+#define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_SHIFT 52
+#define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_MASK 0x7
+};
+
+enum igu_fifo_sources {
+ IGU_SRC_PXP0,
+ IGU_SRC_PXP1,
+ IGU_SRC_PXP2,
+ IGU_SRC_PXP3,
+ IGU_SRC_PXP4,
+ IGU_SRC_PXP5,
+ IGU_SRC_PXP6,
+ IGU_SRC_PXP7,
+ IGU_SRC_CAU,
+ IGU_SRC_ATTN,
+ IGU_SRC_GRC
+};
+
+enum igu_fifo_addr_types {
+ IGU_ADDR_TYPE_MSIX_MEM,
+ IGU_ADDR_TYPE_WRITE_PBA,
+ IGU_ADDR_TYPE_WRITE_INT_ACK,
+ IGU_ADDR_TYPE_WRITE_ATTN_BITS,
+ IGU_ADDR_TYPE_READ_INT,
+ IGU_ADDR_TYPE_WRITE_PROD_UPDATE,
+ IGU_ADDR_TYPE_RESERVED
+};
+
+struct igu_fifo_addr_data {
+ u16 start_addr;
+ u16 end_addr;
+ char *desc;
+ char *vf_desc;
+ enum igu_fifo_addr_types type;
+};
+
+/******************************** Constants **********************************/
+
+#define MAX_MSG_LEN 1024
+
+#define MCP_TRACE_MAX_MODULE_LEN 8
+#define MCP_TRACE_FORMAT_MAX_PARAMS 3
+#define MCP_TRACE_FORMAT_PARAM_WIDTH \
+ (MCP_TRACE_FORMAT_P2_SIZE_OFFSET - MCP_TRACE_FORMAT_P1_SIZE_OFFSET)
+
+#define REG_FIFO_ELEMENT_ADDR_FACTOR 4
+#define REG_FIFO_ELEMENT_IS_PF_VF_VAL 127
+
+#define PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR 4
+
+/***************************** Constant Arrays *******************************/
+
+/* Status string array */
+static const char * const s_status_str[] = {
+ /* DBG_STATUS_OK */
+ "Operation completed successfully",
+
+ /* DBG_STATUS_APP_VERSION_NOT_SET */
+ "Debug application version wasn't set",
+
+ /* DBG_STATUS_UNSUPPORTED_APP_VERSION */
+ "Unsupported debug application version",
+
+ /* DBG_STATUS_DBG_BLOCK_NOT_RESET */
+ "The debug block wasn't reset since the last recording",
+
+ /* DBG_STATUS_INVALID_ARGS */
+ "Invalid arguments",
+
+ /* DBG_STATUS_OUTPUT_ALREADY_SET */
+ "The debug output was already set",
+
+ /* DBG_STATUS_INVALID_PCI_BUF_SIZE */
+ "Invalid PCI buffer size",
+
+ /* DBG_STATUS_PCI_BUF_ALLOC_FAILED */
+ "PCI buffer allocation failed",
+
+ /* DBG_STATUS_PCI_BUF_NOT_ALLOCATED */
+ "A PCI buffer wasn't allocated",
+
+ /* DBG_STATUS_INVALID_FILTER_TRIGGER_DWORDS */
+ "The filter/trigger constraint dword offsets are not enabled for recording",
+ /* DBG_STATUS_NO_MATCHING_FRAMING_MODE */
+ "No matching framing mode",
+
+ /* DBG_STATUS_VFC_READ_ERROR */
+ "Error reading from VFC",
+
+ /* DBG_STATUS_STORM_ALREADY_ENABLED */
+ "The Storm was already enabled",
+
+ /* DBG_STATUS_STORM_NOT_ENABLED */
+ "The specified Storm wasn't enabled",
+
+ /* DBG_STATUS_BLOCK_ALREADY_ENABLED */
+ "The block was already enabled",
+
+ /* DBG_STATUS_BLOCK_NOT_ENABLED */
+ "The specified block wasn't enabled",
+
+ /* DBG_STATUS_NO_INPUT_ENABLED */
+ "No input was enabled for recording",
+
+ /* DBG_STATUS_NO_FILTER_TRIGGER_256B */
+ "Filters and triggers are not allowed in E4 256-bit mode",
+
+ /* DBG_STATUS_FILTER_ALREADY_ENABLED */
+ "The filter was already enabled",
+
+ /* DBG_STATUS_TRIGGER_ALREADY_ENABLED */
+ "The trigger was already enabled",
+
+ /* DBG_STATUS_TRIGGER_NOT_ENABLED */
+ "The trigger wasn't enabled",
+
+ /* DBG_STATUS_CANT_ADD_CONSTRAINT */
+ "A constraint can be added only after a filter was enabled or a trigger state was added",
+
+ /* DBG_STATUS_TOO_MANY_TRIGGER_STATES */
+ "Cannot add more than 3 trigger states",
+
+ /* DBG_STATUS_TOO_MANY_CONSTRAINTS */
+ "Cannot add more than 4 constraints per filter or trigger state",
+
+ /* DBG_STATUS_RECORDING_NOT_STARTED */
+ "The recording wasn't started",
+
+ /* DBG_STATUS_DATA_DIDNT_TRIGGER */
+ "A trigger was configured, but it didn't trigger",
+
+ /* DBG_STATUS_NO_DATA_RECORDED */
+ "No data was recorded",
+
+ /* DBG_STATUS_DUMP_BUF_TOO_SMALL */
+ "Dump buffer is too small",
+
+ /* DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED */
+ "Dumped data is not aligned to chunks",
+
+ /* DBG_STATUS_UNKNOWN_CHIP */
+ "Unknown chip",
+
+ /* DBG_STATUS_VIRT_MEM_ALLOC_FAILED */
+ "Failed allocating virtual memory",
+
+ /* DBG_STATUS_BLOCK_IN_RESET */
+ "The input block is in reset",
+
+ /* DBG_STATUS_INVALID_TRACE_SIGNATURE */
+ "Invalid MCP trace signature found in NVRAM",
+
+ /* DBG_STATUS_INVALID_NVRAM_BUNDLE */
+ "Invalid bundle ID found in NVRAM",
+
+ /* DBG_STATUS_NVRAM_GET_IMAGE_FAILED */
+ "Failed getting NVRAM image",
+
+ /* DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE */
+ "NVRAM image is not dword-aligned",
+
+ /* DBG_STATUS_NVRAM_READ_FAILED */
+ "Failed reading from NVRAM",
+
+ /* DBG_STATUS_IDLE_CHK_PARSE_FAILED */
+ "Idle check parsing failed",
+
+ /* DBG_STATUS_MCP_TRACE_BAD_DATA */
+ "MCP Trace data is corrupt",
+
+ /* DBG_STATUS_MCP_TRACE_NO_META */
+ "Dump doesn't contain meta data - it must be provided in image file",
+
+ /* DBG_STATUS_MCP_COULD_NOT_HALT */
+ "Failed to halt MCP",
+
+ /* DBG_STATUS_MCP_COULD_NOT_RESUME */
+ "Failed to resume MCP after halt",
+
+ /* DBG_STATUS_RESERVED0 */
+ "",
+
+ /* DBG_STATUS_SEMI_FIFO_NOT_EMPTY */
+ "Failed to empty SEMI sync FIFO",
+
+ /* DBG_STATUS_IGU_FIFO_BAD_DATA */
+ "IGU FIFO data is corrupt",
+
+ /* DBG_STATUS_MCP_COULD_NOT_MASK_PRTY */
+ "MCP failed to mask parities",
+
+ /* DBG_STATUS_FW_ASSERTS_PARSE_FAILED */
+ "FW Asserts parsing failed",
+
+ /* DBG_STATUS_REG_FIFO_BAD_DATA */
+ "GRC FIFO data is corrupt",
+
+ /* DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA */
+ "Protection Override data is corrupt",
+
+ /* DBG_STATUS_DBG_ARRAY_NOT_SET */
+ "Debug arrays were not set (when using binary files, dbg_set_bin_ptr must be called)",
+
+ /* DBG_STATUS_RESERVED1 */
+ "",
+
+ /* DBG_STATUS_NON_MATCHING_LINES */
+ "Non-matching debug lines - in E4, all lines must be of the same type (either 128b or 256b)",
+
+ /* DBG_STATUS_INSUFFICIENT_HW_IDS */
+ "Insufficient HW IDs. Try to record less Storms/blocks",
+
+ /* DBG_STATUS_DBG_BUS_IN_USE */
+ "The debug bus is in use",
+
+ /* DBG_STATUS_INVALID_STORM_DBG_MODE */
+ "The storm debug mode is not supported in the current chip",
+
+ /* DBG_STATUS_OTHER_ENGINE_BB_ONLY */
+ "Other engine is supported only in BB",
+
+ /* DBG_STATUS_FILTER_SINGLE_HW_ID */
+ "The configured filter mode requires a single Storm/block input",
+
+ /* DBG_STATUS_TRIGGER_SINGLE_HW_ID */
+ "The configured filter mode requires that all the constraints of a single trigger state will be defined on a single Storm/block input",
+
+ /* DBG_STATUS_MISSING_TRIGGER_STATE_STORM */
+ "When triggering on Storm data, the Storm to trigger on must be specified",
+
+ /* DBG_STATUS_MDUMP2_FAILED_TO_REQUEST_OFFSIZE */
+ "Failed to request MDUMP2 Offsize",
+
+ /* DBG_STATUS_MDUMP2_FAILED_VALIDATION_OF_DATA_CRC */
+ "Expected CRC (part of the MDUMP2 data) is different than the calculated CRC over that data",
+
+ /* DBG_STATUS_MDUMP2_INVALID_SIGNATURE */
+ "Invalid Signature found at start of MDUMP2",
+
+ /* DBG_STATUS_MDUMP2_INVALID_LOG_SIZE */
+ "Invalid Log Size of MDUMP2",
+
+ /* DBG_STATUS_MDUMP2_INVALID_LOG_HDR */
+ "Invalid Log Header of MDUMP2",
+
+ /* DBG_STATUS_MDUMP2_INVALID_LOG_DATA */
+ "Invalid Log Data of MDUMP2",
+
+ /* DBG_STATUS_MDUMP2_ERROR_EXTRACTING_NUM_PORTS */
+ "Could not extract number of ports from regval buf of MDUMP2",
+
+ /* DBG_STATUS_MDUMP2_ERROR_EXTRACTING_MFW_STATUS */
+ "Could not extract MFW (link) status from regval buf of MDUMP2",
+
+ /* DBG_STATUS_MDUMP2_ERROR_DISPLAYING_LINKDUMP */
+ "Could not display linkdump of MDUMP2",
+
+ /* DBG_STATUS_MDUMP2_ERROR_READING_PHY_CFG */
+ "Could not read PHY CFG of MDUMP2",
+
+ /* DBG_STATUS_MDUMP2_ERROR_READING_PLL_MODE */
+ "Could not read PLL Mode of MDUMP2",
+
+ /* DBG_STATUS_MDUMP2_ERROR_READING_LANE_REGS */
+ "Could not read TSCF/TSCE Lane Regs of MDUMP2",
+
+ /* DBG_STATUS_MDUMP2_ERROR_ALLOCATING_BUF */
+ "Could not allocate MDUMP2 reg-val internal buffer"
+};
+
+/* Idle check severity names array */
+static const char * const s_idle_chk_severity_str[] = {
+ "Error",
+ "Error if no traffic",
+ "Warning"
+};
+
+/* MCP Trace level names array */
+static const char * const s_mcp_trace_level_str[] = {
+ "ERROR",
+ "TRACE",
+ "DEBUG"
+};
+
+/* Access type names array */
+static const char * const s_access_strs[] = {
+ "read",
+ "write"
+};
+
+/* Privilege type names array */
+static const char * const s_privilege_strs[] = {
+ "VF",
+ "PDA",
+ "HV",
+ "UA"
+};
+
+/* Protection type names array */
+static const char * const s_protection_strs[] = {
+ "(default)",
+ "(default)",
+ "(default)",
+ "(default)",
+ "override VF",
+ "override PDA",
+ "override HV",
+ "override UA"
+};
+
+/* Master type names array */
+static const char * const s_master_strs[] = {
+ "???",
+ "pxp",
+ "mcp",
+ "msdm",
+ "psdm",
+ "ysdm",
+ "usdm",
+ "tsdm",
+ "xsdm",
+ "dbu",
+ "dmae",
+ "jdap",
+ "???",
+ "???",
+ "???",
+ "???"
+};
+
+/* REG FIFO error messages array */
+static struct reg_fifo_err s_reg_fifo_errors[] = {
+ {1, "grc timeout"},
+ {2, "address doesn't belong to any block"},
+ {4, "reserved address in block or write to read-only address"},
+ {8, "privilege/protection mismatch"},
+ {16, "path isolation error"},
+ {17, "RSL error"}
+};
+
+/* IGU FIFO sources array */
+static const char * const s_igu_fifo_source_strs[] = {
+ "TSTORM",
+ "MSTORM",
+ "USTORM",
+ "XSTORM",
+ "YSTORM",
+ "PSTORM",
+ "PCIE",
+ "NIG_QM_PBF",
+ "CAU",
+ "ATTN",
+ "GRC",
+};
+
+/* IGU FIFO error messages */
+static const char * const s_igu_fifo_error_strs[] = {
+ "no error",
+ "length error",
+ "function disabled",
+ "VF sent command to attention address",
+ "host sent prod update command",
+ "read of during interrupt register while in MIMD mode",
+ "access to PXP BAR reserved address",
+ "producer update command to attention index",
+ "unknown error",
+ "SB index not valid",
+ "SB relative index and FID not found",
+ "FID not match",
+ "command with error flag asserted (PCI error or CAU discard)",
+ "VF sent cleanup and RF cleanup is disabled",
+ "cleanup command on type bigger than 4"
+};
+
+/* IGU FIFO address data */
+static const struct igu_fifo_addr_data s_igu_fifo_addr_data[] = {
+ {0x0, 0x101, "MSI-X Memory", NULL,
+ IGU_ADDR_TYPE_MSIX_MEM},
+ {0x102, 0x1ff, "reserved", NULL,
+ IGU_ADDR_TYPE_RESERVED},
+ {0x200, 0x200, "Write PBA[0:63]", NULL,
+ IGU_ADDR_TYPE_WRITE_PBA},
+ {0x201, 0x201, "Write PBA[64:127]", "reserved",
+ IGU_ADDR_TYPE_WRITE_PBA},
+ {0x202, 0x202, "Write PBA[128]", "reserved",
+ IGU_ADDR_TYPE_WRITE_PBA},
+ {0x203, 0x3ff, "reserved", NULL,
+ IGU_ADDR_TYPE_RESERVED},
+ {0x400, 0x5ef, "Write interrupt acknowledgment", NULL,
+ IGU_ADDR_TYPE_WRITE_INT_ACK},
+ {0x5f0, 0x5f0, "Attention bits update", NULL,
+ IGU_ADDR_TYPE_WRITE_ATTN_BITS},
+ {0x5f1, 0x5f1, "Attention bits set", NULL,
+ IGU_ADDR_TYPE_WRITE_ATTN_BITS},
+ {0x5f2, 0x5f2, "Attention bits clear", NULL,
+ IGU_ADDR_TYPE_WRITE_ATTN_BITS},
+ {0x5f3, 0x5f3, "Read interrupt 0:63 with mask", NULL,
+ IGU_ADDR_TYPE_READ_INT},
+ {0x5f4, 0x5f4, "Read interrupt 0:31 with mask", NULL,
+ IGU_ADDR_TYPE_READ_INT},
+ {0x5f5, 0x5f5, "Read interrupt 32:63 with mask", NULL,
+ IGU_ADDR_TYPE_READ_INT},
+ {0x5f6, 0x5f6, "Read interrupt 0:63 without mask", NULL,
+ IGU_ADDR_TYPE_READ_INT},
+ {0x5f7, 0x5ff, "reserved", NULL,
+ IGU_ADDR_TYPE_RESERVED},
+ {0x600, 0x7ff, "Producer update", NULL,
+ IGU_ADDR_TYPE_WRITE_PROD_UPDATE}
+};
+
+/******************************** Variables **********************************/
+
+/* Temporary buffer, used for print size calculations */
+static char s_temp_buf[MAX_MSG_LEN];
+
+/**************************** Private Functions ******************************/
+
+static void qed_user_static_asserts(void)
+{
+}
+
+static u32 qed_cyclic_add(u32 a, u32 b, u32 size)
+{
+ return (a + b) % size;
+}
+
+static u32 qed_cyclic_sub(u32 a, u32 b, u32 size)
+{
+ return (size + a - b) % size;
+}
+
+/* Reads the specified number of bytes from the specified cyclic buffer (up to 4
+ * bytes) and returns them as a dword value. the specified buffer offset is
+ * updated.
+ */
+static u32 qed_read_from_cyclic_buf(void *buf,
+ u32 *offset,
+ u32 buf_size, u8 num_bytes_to_read)
+{
+ u8 i, *val_ptr, *bytes_buf = (u8 *)buf;
+ u32 val = 0;
+
+ val_ptr = (u8 *)&val;
+
+ /* Assume running on a LITTLE ENDIAN and the buffer is network order
+ * (BIG ENDIAN), as high order bytes are placed in lower memory address.
+ */
+ for (i = 0; i < num_bytes_to_read; i++) {
+ val_ptr[i] = bytes_buf[*offset];
+ *offset = qed_cyclic_add(*offset, 1, buf_size);
+ }
+
+ return val;
+}
+
+/* Reads and returns the next byte from the specified buffer.
+ * The specified buffer offset is updated.
+ */
+static u8 qed_read_byte_from_buf(void *buf, u32 *offset)
+{
+ return ((u8 *)buf)[(*offset)++];
+}
+
+/* Reads and returns the next dword from the specified buffer.
+ * The specified buffer offset is updated.
+ */
+static u32 qed_read_dword_from_buf(void *buf, u32 *offset)
+{
+ u32 dword_val = *(u32 *)&((u8 *)buf)[*offset];
+
+ *offset += 4;
+
+ return dword_val;
+}
+
+/* Reads the next string from the specified buffer, and copies it to the
+ * specified pointer. The specified buffer offset is updated.
+ */
+static void qed_read_str_from_buf(void *buf, u32 *offset, u32 size, char *dest)
+{
+ const char *source_str = &((const char *)buf)[*offset];
+
+ strncpy(dest, source_str, size);
+ dest[size - 1] = '\0';
+ *offset += size;
+}
+
+/* Returns a pointer to the specified offset (in bytes) of the specified buffer.
+ * If the specified buffer in NULL, a temporary buffer pointer is returned.
+ */
+static char *qed_get_buf_ptr(void *buf, u32 offset)
+{
+ return buf ? (char *)buf + offset : s_temp_buf;
+}
+
+/* Reads a param from the specified buffer. Returns the number of dwords read.
+ * If the returned str_param is NULL, the param is numeric and its value is
+ * returned in num_param.
+ * Otheriwise, the param is a string and its pointer is returned in str_param.
+ */
+static u32 qed_read_param(u32 *dump_buf,
+ const char **param_name,
+ const char **param_str_val, u32 *param_num_val)
+{
+ char *char_buf = (char *)dump_buf;
+ size_t offset = 0;
+
+ /* Extract param name */
+ *param_name = char_buf;
+ offset += strlen(*param_name) + 1;
+
+ /* Check param type */
+ if (*(char_buf + offset++)) {
+ /* String param */
+ *param_str_val = char_buf + offset;
+ *param_num_val = 0;
+ offset += strlen(*param_str_val) + 1;
+ if (offset & 0x3)
+ offset += (4 - (offset & 0x3));
+ } else {
+ /* Numeric param */
+ *param_str_val = NULL;
+ if (offset & 0x3)
+ offset += (4 - (offset & 0x3));
+ *param_num_val = *(u32 *)(char_buf + offset);
+ offset += 4;
+ }
+
+ return (u32)offset / 4;
+}
+
+/* Reads a section header from the specified buffer.
+ * Returns the number of dwords read.
+ */
+static u32 qed_read_section_hdr(u32 *dump_buf,
+ const char **section_name,
+ u32 *num_section_params)
+{
+ const char *param_str_val;
+
+ return qed_read_param(dump_buf,
+ section_name, &param_str_val, num_section_params);
+}
+
+/* Reads section params from the specified buffer and prints them to the results
+ * buffer. Returns the number of dwords read.
+ */
+static u32 qed_print_section_params(u32 *dump_buf,
+ u32 num_section_params,
+ char *results_buf, u32 *num_chars_printed)
+{
+ u32 i, dump_offset = 0, results_offset = 0;
+
+ for (i = 0; i < num_section_params; i++) {
+ const char *param_name, *param_str_val;
+ u32 param_num_val = 0;
+
+ dump_offset += qed_read_param(dump_buf + dump_offset,
+ &param_name,
+ &param_str_val, &param_num_val);
+
+ if (param_str_val)
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "%s: %s\n", param_name, param_str_val);
+ else if (strcmp(param_name, "fw-timestamp"))
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "%s: %d\n", param_name, param_num_val);
+ }
+
+ results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset),
+ "\n");
+
+ *num_chars_printed = results_offset;
+
+ return dump_offset;
+}
+
+/* Returns the block name that matches the specified block ID,
+ * or NULL if not found.
+ */
+static const char *qed_dbg_get_block_name(struct qed_hwfn *p_hwfn,
+ enum block_id block_id)
+{
+ const struct dbg_block_user *block =
+ (const struct dbg_block_user *)
+ p_hwfn->dbg_arrays[BIN_BUF_DBG_BLOCKS_USER_DATA].ptr + block_id;
+
+ return (const char *)block->name;
+}
+
+static struct dbg_tools_user_data *qed_dbg_get_user_data(struct qed_hwfn
+ *p_hwfn)
+{
+ return (struct dbg_tools_user_data *)p_hwfn->dbg_user_info;
+}
+
+/* Parses the idle check rules and returns the number of characters printed.
+ * In case of parsing error, returns 0.
+ */
+static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 *dump_buf_end,
+ u32 num_rules,
+ bool print_fw_idle_chk,
+ char *results_buf,
+ u32 *num_errors, u32 *num_warnings)
+{
+ /* Offset in results_buf in bytes */
+ u32 results_offset = 0;
+
+ u32 rule_idx;
+ u16 i, j;
+
+ *num_errors = 0;
+ *num_warnings = 0;
+
+ /* Go over dumped results */
+ for (rule_idx = 0; rule_idx < num_rules && dump_buf < dump_buf_end;
+ rule_idx++) {
+ const struct dbg_idle_chk_rule_parsing_data *rule_parsing_data;
+ struct dbg_idle_chk_result_hdr *hdr;
+ const char *parsing_str, *lsi_msg;
+ u32 parsing_str_offset;
+ bool has_fw_msg;
+ u8 curr_reg_id;
+
+ hdr = (struct dbg_idle_chk_result_hdr *)dump_buf;
+ rule_parsing_data =
+ (const struct dbg_idle_chk_rule_parsing_data *)
+ p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr +
+ hdr->rule_id;
+ parsing_str_offset =
+ GET_FIELD(rule_parsing_data->data,
+ DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET);
+ has_fw_msg =
+ GET_FIELD(rule_parsing_data->data,
+ DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG) > 0;
+ parsing_str = (const char *)
+ p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr +
+ parsing_str_offset;
+ lsi_msg = parsing_str;
+ curr_reg_id = 0;
+
+ if (hdr->severity >= MAX_DBG_IDLE_CHK_SEVERITY_TYPES)
+ return 0;
+
+ /* Skip rule header */
+ dump_buf += BYTES_TO_DWORDS(sizeof(*hdr));
+
+ /* Update errors/warnings count */
+ if (hdr->severity == IDLE_CHK_SEVERITY_ERROR ||
+ hdr->severity == IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC)
+ (*num_errors)++;
+ else
+ (*num_warnings)++;
+
+ /* Print rule severity */
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset), "%s: ",
+ s_idle_chk_severity_str[hdr->severity]);
+
+ /* Print rule message */
+ if (has_fw_msg)
+ parsing_str += strlen(parsing_str) + 1;
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset), "%s.",
+ has_fw_msg &&
+ print_fw_idle_chk ? parsing_str : lsi_msg);
+ parsing_str += strlen(parsing_str) + 1;
+
+ /* Print register values */
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset), " Registers:");
+ for (i = 0;
+ i < hdr->num_dumped_cond_regs + hdr->num_dumped_info_regs;
+ i++) {
+ struct dbg_idle_chk_result_reg_hdr *reg_hdr;
+ bool is_mem;
+ u8 reg_id;
+
+ reg_hdr =
+ (struct dbg_idle_chk_result_reg_hdr *)dump_buf;
+ is_mem = GET_FIELD(reg_hdr->data,
+ DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM);
+ reg_id = GET_FIELD(reg_hdr->data,
+ DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID);
+
+ /* Skip reg header */
+ dump_buf += BYTES_TO_DWORDS(sizeof(*reg_hdr));
+
+ /* Skip register names until the required reg_id is
+ * reached.
+ */
+ for (; reg_id > curr_reg_id; curr_reg_id++)
+ parsing_str += strlen(parsing_str) + 1;
+
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset), " %s",
+ parsing_str);
+ if (i < hdr->num_dumped_cond_regs && is_mem)
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "[%d]", hdr->mem_entry_id +
+ reg_hdr->start_entry);
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset), "=");
+ for (j = 0; j < reg_hdr->size; j++, dump_buf++) {
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "0x%x", *dump_buf);
+ if (j < reg_hdr->size - 1)
+ results_offset +=
+ sprintf(qed_get_buf_ptr
+ (results_buf,
+ results_offset), ",");
+ }
+ }
+
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n");
+ }
+
+ /* Check if end of dump buffer was exceeded */
+ if (dump_buf > dump_buf_end)
+ return 0;
+
+ return results_offset;
+}
+
+/* Parses an idle check dump buffer.
+ * If result_buf is not NULL, the idle check results are printed to it.
+ * In any case, the required results buffer size is assigned to
+ * parsed_results_bytes.
+ * The parsing status is returned.
+ */
+static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf,
+ u32 *parsed_results_bytes,
+ u32 *num_errors,
+ u32 *num_warnings)
+{
+ u32 num_section_params = 0, num_rules, num_rules_not_dumped;
+ const char *section_name, *param_name, *param_str_val;
+ u32 *dump_buf_end = dump_buf + num_dumped_dwords;
+
+ /* Offset in results_buf in bytes */
+ u32 results_offset = 0;
+
+ *parsed_results_bytes = 0;
+ *num_errors = 0;
+ *num_warnings = 0;
+
+ if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr ||
+ !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr)
+ return DBG_STATUS_DBG_ARRAY_NOT_SET;
+
+ /* Read global_params section */
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name, &num_section_params);
+ if (strcmp(section_name, "global_params"))
+ return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
+
+ /* Print global params */
+ dump_buf += qed_print_section_params(dump_buf,
+ num_section_params,
+ results_buf, &results_offset);
+
+ /* Read idle_chk section
+ * There may be 1 or 2 idle_chk section parameters:
+ * - 1st is "num_rules"
+ * - 2nd is "num_rules_not_dumped" (optional)
+ */
+
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name, &num_section_params);
+ if (strcmp(section_name, "idle_chk") ||
+ (num_section_params != 2 && num_section_params != 1))
+ return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
+ dump_buf += qed_read_param(dump_buf,
+ &param_name, &param_str_val, &num_rules);
+ if (strcmp(param_name, "num_rules"))
+ return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
+ if (num_section_params > 1) {
+ dump_buf += qed_read_param(dump_buf,
+ &param_name,
+ &param_str_val,
+ &num_rules_not_dumped);
+ if (strcmp(param_name, "num_rules_not_dumped"))
+ return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
+ } else {
+ num_rules_not_dumped = 0;
+ }
+
+ if (num_rules) {
+ u32 rules_print_size;
+
+ /* Print FW output */
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "FW_IDLE_CHECK:\n");
+ rules_print_size =
+ qed_parse_idle_chk_dump_rules(p_hwfn,
+ dump_buf,
+ dump_buf_end,
+ num_rules,
+ true,
+ results_buf ?
+ results_buf +
+ results_offset :
+ NULL,
+ num_errors,
+ num_warnings);
+ results_offset += rules_print_size;
+ if (!rules_print_size)
+ return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
+
+ /* Print LSI output */
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "\nLSI_IDLE_CHECK:\n");
+ rules_print_size =
+ qed_parse_idle_chk_dump_rules(p_hwfn,
+ dump_buf,
+ dump_buf_end,
+ num_rules,
+ false,
+ results_buf ?
+ results_buf +
+ results_offset :
+ NULL,
+ num_errors,
+ num_warnings);
+ results_offset += rules_print_size;
+ if (!rules_print_size)
+ return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
+ }
+
+ /* Print errors/warnings count */
+ if (*num_errors)
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "\nIdle Check failed!!! (with %d errors and %d warnings)\n",
+ *num_errors, *num_warnings);
+ else if (*num_warnings)
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "\nIdle Check completed successfully (with %d warnings)\n",
+ *num_warnings);
+ else
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "\nIdle Check completed successfully\n");
+
+ if (num_rules_not_dumped)
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "\nIdle Check Partially dumped : num_rules_not_dumped = %d\n",
+ num_rules_not_dumped);
+
+ /* Add 1 for string NULL termination */
+ *parsed_results_bytes = results_offset + 1;
+
+ return DBG_STATUS_OK;
+}
+
+/* Allocates and fills MCP Trace meta data based on the specified meta data
+ * dump buffer.
+ * Returns debug status code.
+ */
+static enum dbg_status
+qed_mcp_trace_alloc_meta_data(struct qed_hwfn *p_hwfn,
+ const u32 *meta_buf)
+{
+ struct dbg_tools_user_data *dev_user_data;
+ u32 offset = 0, signature, i;
+ struct mcp_trace_meta *meta;
+ u8 *meta_buf_bytes;
+
+ dev_user_data = qed_dbg_get_user_data(p_hwfn);
+ meta = &dev_user_data->mcp_trace_meta;
+ meta_buf_bytes = (u8 *)meta_buf;
+
+ /* Free the previous meta before loading a new one. */
+ if (meta->is_allocated)
+ qed_mcp_trace_free_meta_data(p_hwfn);
+
+ memset(meta, 0, sizeof(*meta));
+
+ /* Read first signature */
+ signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
+ if (signature != NVM_MAGIC_VALUE)
+ return DBG_STATUS_INVALID_TRACE_SIGNATURE;
+
+ /* Read no. of modules and allocate memory for their pointers */
+ meta->modules_num = qed_read_byte_from_buf(meta_buf_bytes, &offset);
+ meta->modules = kcalloc(meta->modules_num, sizeof(char *),
+ GFP_KERNEL);
+ if (!meta->modules)
+ return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
+
+ /* Allocate and read all module strings */
+ for (i = 0; i < meta->modules_num; i++) {
+ u8 module_len = qed_read_byte_from_buf(meta_buf_bytes, &offset);
+
+ *(meta->modules + i) = kzalloc(module_len, GFP_KERNEL);
+ if (!(*(meta->modules + i))) {
+ /* Update number of modules to be released */
+ meta->modules_num = i ? i - 1 : 0;
+ return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
+ }
+
+ qed_read_str_from_buf(meta_buf_bytes, &offset, module_len,
+ *(meta->modules + i));
+ if (module_len > MCP_TRACE_MAX_MODULE_LEN)
+ (*(meta->modules + i))[MCP_TRACE_MAX_MODULE_LEN] = '\0';
+ }
+
+ /* Read second signature */
+ signature = qed_read_dword_from_buf(meta_buf_bytes, &offset);
+ if (signature != NVM_MAGIC_VALUE)
+ return DBG_STATUS_INVALID_TRACE_SIGNATURE;
+
+ /* Read number of formats and allocate memory for all formats */
+ meta->formats_num = qed_read_dword_from_buf(meta_buf_bytes, &offset);
+ meta->formats = kcalloc(meta->formats_num,
+ sizeof(struct mcp_trace_format),
+ GFP_KERNEL);
+ if (!meta->formats)
+ return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
+
+ /* Allocate and read all strings */
+ for (i = 0; i < meta->formats_num; i++) {
+ struct mcp_trace_format *format_ptr = &meta->formats[i];
+ u8 format_len;
+
+ format_ptr->data = qed_read_dword_from_buf(meta_buf_bytes,
+ &offset);
+ format_len = GET_MFW_FIELD(format_ptr->data,
+ MCP_TRACE_FORMAT_LEN);
+ format_ptr->format_str = kzalloc(format_len, GFP_KERNEL);
+ if (!format_ptr->format_str) {
+ /* Update number of modules to be released */
+ meta->formats_num = i ? i - 1 : 0;
+ return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
+ }
+
+ qed_read_str_from_buf(meta_buf_bytes,
+ &offset,
+ format_len, format_ptr->format_str);
+ }
+
+ meta->is_allocated = true;
+ return DBG_STATUS_OK;
+}
+
+/* Parses an MCP trace buffer. If result_buf is not NULL, the MCP Trace results
+ * are printed to it. The parsing status is returned.
+ * Arguments:
+ * trace_buf - MCP trace cyclic buffer
+ * trace_buf_size - MCP trace cyclic buffer size in bytes
+ * data_offset - offset in bytes of the data to parse in the MCP trace cyclic
+ * buffer.
+ * data_size - size in bytes of data to parse.
+ * parsed_buf - destination buffer for parsed data.
+ * parsed_results_bytes - size of parsed data in bytes.
+ */
+static enum dbg_status qed_parse_mcp_trace_buf(struct qed_hwfn *p_hwfn,
+ u8 *trace_buf,
+ u32 trace_buf_size,
+ u32 data_offset,
+ u32 data_size,
+ char *parsed_buf,
+ u32 *parsed_results_bytes)
+{
+ struct dbg_tools_user_data *dev_user_data;
+ struct mcp_trace_meta *meta;
+ u32 param_mask, param_shift;
+ enum dbg_status status;
+
+ dev_user_data = qed_dbg_get_user_data(p_hwfn);
+ meta = &dev_user_data->mcp_trace_meta;
+ *parsed_results_bytes = 0;
+
+ if (!meta->is_allocated)
+ return DBG_STATUS_MCP_TRACE_BAD_DATA;
+
+ status = DBG_STATUS_OK;
+
+ while (data_size) {
+ struct mcp_trace_format *format_ptr;
+ u8 format_level, format_module;
+ u32 params[3] = { 0, 0, 0 };
+ u32 header, format_idx, i;
+
+ if (data_size < MFW_TRACE_ENTRY_SIZE)
+ return DBG_STATUS_MCP_TRACE_BAD_DATA;
+
+ header = qed_read_from_cyclic_buf(trace_buf,
+ &data_offset,
+ trace_buf_size,
+ MFW_TRACE_ENTRY_SIZE);
+ data_size -= MFW_TRACE_ENTRY_SIZE;
+ format_idx = header & MFW_TRACE_EVENTID_MASK;
+
+ /* Skip message if its index doesn't exist in the meta data */
+ if (format_idx >= meta->formats_num) {
+ u8 format_size = (u8)GET_MFW_FIELD(header,
+ MFW_TRACE_PRM_SIZE);
+
+ if (data_size < format_size)
+ return DBG_STATUS_MCP_TRACE_BAD_DATA;
+
+ data_offset = qed_cyclic_add(data_offset,
+ format_size,
+ trace_buf_size);
+ data_size -= format_size;
+ continue;
+ }
+
+ format_ptr = &meta->formats[format_idx];
+
+ for (i = 0,
+ param_mask = MCP_TRACE_FORMAT_P1_SIZE_MASK, param_shift =
+ MCP_TRACE_FORMAT_P1_SIZE_OFFSET;
+ i < MCP_TRACE_FORMAT_MAX_PARAMS;
+ i++, param_mask <<= MCP_TRACE_FORMAT_PARAM_WIDTH,
+ param_shift += MCP_TRACE_FORMAT_PARAM_WIDTH) {
+ /* Extract param size (0..3) */
+ u8 param_size = (u8)((format_ptr->data & param_mask) >>
+ param_shift);
+
+ /* If the param size is zero, there are no other
+ * parameters.
+ */
+ if (!param_size)
+ break;
+
+ /* Size is encoded using 2 bits, where 3 is used to
+ * encode 4.
+ */
+ if (param_size == 3)
+ param_size = 4;
+
+ if (data_size < param_size)
+ return DBG_STATUS_MCP_TRACE_BAD_DATA;
+
+ params[i] = qed_read_from_cyclic_buf(trace_buf,
+ &data_offset,
+ trace_buf_size,
+ param_size);
+ data_size -= param_size;
+ }
+
+ format_level = (u8)GET_MFW_FIELD(format_ptr->data,
+ MCP_TRACE_FORMAT_LEVEL);
+ format_module = (u8)GET_MFW_FIELD(format_ptr->data,
+ MCP_TRACE_FORMAT_MODULE);
+ if (format_level >= ARRAY_SIZE(s_mcp_trace_level_str))
+ return DBG_STATUS_MCP_TRACE_BAD_DATA;
+
+ /* Print current message to results buffer */
+ *parsed_results_bytes +=
+ sprintf(qed_get_buf_ptr(parsed_buf,
+ *parsed_results_bytes),
+ "%s %-8s: ",
+ s_mcp_trace_level_str[format_level],
+ meta->modules[format_module]);
+ *parsed_results_bytes +=
+ sprintf(qed_get_buf_ptr(parsed_buf, *parsed_results_bytes),
+ format_ptr->format_str,
+ params[0], params[1], params[2]);
+ }
+
+ /* Add string NULL terminator */
+ (*parsed_results_bytes)++;
+
+ return status;
+}
+
+/* Parses an MCP Trace dump buffer.
+ * If result_buf is not NULL, the MCP Trace results are printed to it.
+ * In any case, the required results buffer size is assigned to
+ * parsed_results_bytes.
+ * The parsing status is returned.
+ */
+static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ char *results_buf,
+ u32 *parsed_results_bytes,
+ bool free_meta_data)
+{
+ const char *section_name, *param_name, *param_str_val;
+ u32 data_size, trace_data_dwords, trace_meta_dwords;
+ u32 offset, results_offset, results_buf_bytes;
+ u32 param_num_val, num_section_params;
+ struct mcp_trace *trace;
+ enum dbg_status status;
+ const u32 *meta_buf;
+ u8 *trace_buf;
+
+ *parsed_results_bytes = 0;
+
+ /* Read global_params section */
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name, &num_section_params);
+ if (strcmp(section_name, "global_params"))
+ return DBG_STATUS_MCP_TRACE_BAD_DATA;
+
+ /* Print global params */
+ dump_buf += qed_print_section_params(dump_buf,
+ num_section_params,
+ results_buf, &results_offset);
+
+ /* Read trace_data section */
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name, &num_section_params);
+ if (strcmp(section_name, "mcp_trace_data") || num_section_params != 1)
+ return DBG_STATUS_MCP_TRACE_BAD_DATA;
+ dump_buf += qed_read_param(dump_buf,
+ &param_name, &param_str_val, &param_num_val);
+ if (strcmp(param_name, "size"))
+ return DBG_STATUS_MCP_TRACE_BAD_DATA;
+ trace_data_dwords = param_num_val;
+
+ /* Prepare trace info */
+ trace = (struct mcp_trace *)dump_buf;
+ if (trace->signature != MFW_TRACE_SIGNATURE || !trace->size)
+ return DBG_STATUS_MCP_TRACE_BAD_DATA;
+
+ trace_buf = (u8 *)dump_buf + sizeof(*trace);
+ offset = trace->trace_oldest;
+ data_size = qed_cyclic_sub(trace->trace_prod, offset, trace->size);
+ dump_buf += trace_data_dwords;
+
+ /* Read meta_data section */
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name, &num_section_params);
+ if (strcmp(section_name, "mcp_trace_meta"))
+ return DBG_STATUS_MCP_TRACE_BAD_DATA;
+ dump_buf += qed_read_param(dump_buf,
+ &param_name, &param_str_val, &param_num_val);
+ if (strcmp(param_name, "size"))
+ return DBG_STATUS_MCP_TRACE_BAD_DATA;
+ trace_meta_dwords = param_num_val;
+
+ /* Choose meta data buffer */
+ if (!trace_meta_dwords) {
+ /* Dump doesn't include meta data */
+ struct dbg_tools_user_data *dev_user_data =
+ qed_dbg_get_user_data(p_hwfn);
+
+ if (!dev_user_data->mcp_trace_user_meta_buf)
+ return DBG_STATUS_MCP_TRACE_NO_META;
+
+ meta_buf = dev_user_data->mcp_trace_user_meta_buf;
+ } else {
+ /* Dump includes meta data */
+ meta_buf = dump_buf;
+ }
+
+ /* Allocate meta data memory */
+ status = qed_mcp_trace_alloc_meta_data(p_hwfn, meta_buf);
+ if (status != DBG_STATUS_OK)
+ return status;
+
+ status = qed_parse_mcp_trace_buf(p_hwfn,
+ trace_buf,
+ trace->size,
+ offset,
+ data_size,
+ results_buf ?
+ results_buf + results_offset :
+ NULL,
+ &results_buf_bytes);
+ if (status != DBG_STATUS_OK)
+ return status;
+
+ if (free_meta_data)
+ qed_mcp_trace_free_meta_data(p_hwfn);
+
+ *parsed_results_bytes = results_offset + results_buf_bytes;
+
+ return DBG_STATUS_OK;
+}
+
+/* Parses a Reg FIFO dump buffer.
+ * If result_buf is not NULL, the Reg FIFO results are printed to it.
+ * In any case, the required results buffer size is assigned to
+ * parsed_results_bytes.
+ * The parsing status is returned.
+ */
+static enum dbg_status qed_parse_reg_fifo_dump(u32 *dump_buf,
+ char *results_buf,
+ u32 *parsed_results_bytes)
+{
+ const char *section_name, *param_name, *param_str_val;
+ u32 param_num_val, num_section_params, num_elements;
+ struct reg_fifo_element *elements;
+ u8 i, j, err_code, vf_val;
+ u32 results_offset = 0;
+ char vf_str[4];
+
+ /* Read global_params section */
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name, &num_section_params);
+ if (strcmp(section_name, "global_params"))
+ return DBG_STATUS_REG_FIFO_BAD_DATA;
+
+ /* Print global params */
+ dump_buf += qed_print_section_params(dump_buf,
+ num_section_params,
+ results_buf, &results_offset);
+
+ /* Read reg_fifo_data section */
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name, &num_section_params);
+ if (strcmp(section_name, "reg_fifo_data"))
+ return DBG_STATUS_REG_FIFO_BAD_DATA;
+ dump_buf += qed_read_param(dump_buf,
+ &param_name, &param_str_val, &param_num_val);
+ if (strcmp(param_name, "size"))
+ return DBG_STATUS_REG_FIFO_BAD_DATA;
+ if (param_num_val % REG_FIFO_ELEMENT_DWORDS)
+ return DBG_STATUS_REG_FIFO_BAD_DATA;
+ num_elements = param_num_val / REG_FIFO_ELEMENT_DWORDS;
+ elements = (struct reg_fifo_element *)dump_buf;
+
+ /* Decode elements */
+ for (i = 0; i < num_elements; i++) {
+ const char *err_msg = NULL;
+
+ /* Discover if element belongs to a VF or a PF */
+ vf_val = GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_VF);
+ if (vf_val == REG_FIFO_ELEMENT_IS_PF_VF_VAL)
+ sprintf(vf_str, "%s", "N/A");
+ else
+ sprintf(vf_str, "%d", vf_val);
+
+ /* Find error message */
+ err_code = GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_ERROR);
+ for (j = 0; j < ARRAY_SIZE(s_reg_fifo_errors) && !err_msg; j++)
+ if (err_code == s_reg_fifo_errors[j].err_code)
+ err_msg = s_reg_fifo_errors[j].err_msg;
+
+ /* Add parsed element to parsed buffer */
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "raw: 0x%016llx, address: 0x%07x, access: %-5s, pf: %2d, vf: %s, port: %d, privilege: %-3s, protection: %-12s, master: %-4s, error: %s\n",
+ elements[i].data,
+ (u32)GET_FIELD(elements[i].data,
+ REG_FIFO_ELEMENT_ADDRESS) *
+ REG_FIFO_ELEMENT_ADDR_FACTOR,
+ s_access_strs[GET_FIELD(elements[i].data,
+ REG_FIFO_ELEMENT_ACCESS)],
+ (u32)GET_FIELD(elements[i].data,
+ REG_FIFO_ELEMENT_PF),
+ vf_str,
+ (u32)GET_FIELD(elements[i].data,
+ REG_FIFO_ELEMENT_PORT),
+ s_privilege_strs[GET_FIELD(elements[i].data,
+ REG_FIFO_ELEMENT_PRIVILEGE)],
+ s_protection_strs[GET_FIELD(elements[i].data,
+ REG_FIFO_ELEMENT_PROTECTION)],
+ s_master_strs[GET_FIELD(elements[i].data,
+ REG_FIFO_ELEMENT_MASTER)],
+ err_msg ? err_msg : "unknown error code");
+ }
+
+ results_offset += sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "fifo contained %d elements", num_elements);
+
+ /* Add 1 for string NULL termination */
+ *parsed_results_bytes = results_offset + 1;
+
+ return DBG_STATUS_OK;
+}
+
+static enum dbg_status qed_parse_igu_fifo_element(struct igu_fifo_element
+ *element, char
+ *results_buf,
+ u32 *results_offset)
+{
+ const struct igu_fifo_addr_data *found_addr = NULL;
+ u8 source, err_type, i, is_cleanup;
+ char parsed_addr_data[32];
+ char parsed_wr_data[256];
+ u32 wr_data, prod_cons;
+ bool is_wr_cmd, is_pf;
+ u16 cmd_addr;
+ u64 dword12;
+
+ /* Dword12 (dword index 1 and 2) contains bits 32..95 of the
+ * FIFO element.
+ */
+ dword12 = ((u64)element->dword2 << 32) | element->dword1;
+ is_wr_cmd = GET_FIELD(dword12, IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD);
+ is_pf = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_IS_PF);
+ cmd_addr = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR);
+ source = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_SOURCE);
+ err_type = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE);
+
+ if (source >= ARRAY_SIZE(s_igu_fifo_source_strs))
+ return DBG_STATUS_IGU_FIFO_BAD_DATA;
+ if (err_type >= ARRAY_SIZE(s_igu_fifo_error_strs))
+ return DBG_STATUS_IGU_FIFO_BAD_DATA;
+
+ /* Find address data */
+ for (i = 0; i < ARRAY_SIZE(s_igu_fifo_addr_data) && !found_addr; i++) {
+ const struct igu_fifo_addr_data *curr_addr =
+ &s_igu_fifo_addr_data[i];
+
+ if (cmd_addr >= curr_addr->start_addr && cmd_addr <=
+ curr_addr->end_addr)
+ found_addr = curr_addr;
+ }
+
+ if (!found_addr)
+ return DBG_STATUS_IGU_FIFO_BAD_DATA;
+
+ /* Prepare parsed address data */
+ switch (found_addr->type) {
+ case IGU_ADDR_TYPE_MSIX_MEM:
+ sprintf(parsed_addr_data, " vector_num = 0x%x", cmd_addr / 2);
+ break;
+ case IGU_ADDR_TYPE_WRITE_INT_ACK:
+ case IGU_ADDR_TYPE_WRITE_PROD_UPDATE:
+ sprintf(parsed_addr_data,
+ " SB = 0x%x", cmd_addr - found_addr->start_addr);
+ break;
+ default:
+ parsed_addr_data[0] = '\0';
+ }
+
+ if (!is_wr_cmd) {
+ parsed_wr_data[0] = '\0';
+ goto out;
+ }
+
+ /* Prepare parsed write data */
+ wr_data = GET_FIELD(dword12, IGU_FIFO_ELEMENT_DWORD12_WR_DATA);
+ prod_cons = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_PROD_CONS);
+ is_cleanup = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_CMD_TYPE);
+
+ if (source == IGU_SRC_ATTN) {
+ sprintf(parsed_wr_data, "prod: 0x%x, ", prod_cons);
+ } else {
+ if (is_cleanup) {
+ u8 cleanup_val, cleanup_type;
+
+ cleanup_val =
+ GET_FIELD(wr_data,
+ IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL);
+ cleanup_type =
+ GET_FIELD(wr_data,
+ IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE);
+
+ sprintf(parsed_wr_data,
+ "cmd_type: cleanup, cleanup_val: %s, cleanup_type : %d, ",
+ cleanup_val ? "set" : "clear",
+ cleanup_type);
+ } else {
+ u8 update_flag, en_dis_int_for_sb, segment;
+ u8 timer_mask;
+
+ update_flag = GET_FIELD(wr_data,
+ IGU_FIFO_WR_DATA_UPDATE_FLAG);
+ en_dis_int_for_sb =
+ GET_FIELD(wr_data,
+ IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB);
+ segment = GET_FIELD(wr_data,
+ IGU_FIFO_WR_DATA_SEGMENT);
+ timer_mask = GET_FIELD(wr_data,
+ IGU_FIFO_WR_DATA_TIMER_MASK);
+
+ sprintf(parsed_wr_data,
+ "cmd_type: prod/cons update, prod/cons: 0x%x, update_flag: %s, en_dis_int_for_sb : %s, segment : %s, timer_mask = %d, ",
+ prod_cons,
+ update_flag ? "update" : "nop",
+ en_dis_int_for_sb ?
+ (en_dis_int_for_sb == 1 ? "disable" : "nop") :
+ "enable",
+ segment ? "attn" : "regular",
+ timer_mask);
+ }
+ }
+out:
+ /* Add parsed element to parsed buffer */
+ *results_offset += sprintf(qed_get_buf_ptr(results_buf,
+ *results_offset),
+ "raw: 0x%01x%08x%08x, %s: %d, source : %s, type : %s, cmd_addr : 0x%x(%s%s), %serror: %s\n",
+ element->dword2, element->dword1,
+ element->dword0,
+ is_pf ? "pf" : "vf",
+ GET_FIELD(element->dword0,
+ IGU_FIFO_ELEMENT_DWORD0_FID),
+ s_igu_fifo_source_strs[source],
+ is_wr_cmd ? "wr" : "rd",
+ cmd_addr,
+ (!is_pf && found_addr->vf_desc)
+ ? found_addr->vf_desc
+ : found_addr->desc,
+ parsed_addr_data,
+ parsed_wr_data,
+ s_igu_fifo_error_strs[err_type]);
+
+ return DBG_STATUS_OK;
+}
+
+/* Parses an IGU FIFO dump buffer.
+ * If result_buf is not NULL, the IGU FIFO results are printed to it.
+ * In any case, the required results buffer size is assigned to
+ * parsed_results_bytes.
+ * The parsing status is returned.
+ */
+static enum dbg_status qed_parse_igu_fifo_dump(u32 *dump_buf,
+ char *results_buf,
+ u32 *parsed_results_bytes)
+{
+ const char *section_name, *param_name, *param_str_val;
+ u32 param_num_val, num_section_params, num_elements;
+ struct igu_fifo_element *elements;
+ enum dbg_status status;
+ u32 results_offset = 0;
+ u8 i;
+
+ /* Read global_params section */
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name, &num_section_params);
+ if (strcmp(section_name, "global_params"))
+ return DBG_STATUS_IGU_FIFO_BAD_DATA;
+
+ /* Print global params */
+ dump_buf += qed_print_section_params(dump_buf,
+ num_section_params,
+ results_buf, &results_offset);
+
+ /* Read igu_fifo_data section */
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name, &num_section_params);
+ if (strcmp(section_name, "igu_fifo_data"))
+ return DBG_STATUS_IGU_FIFO_BAD_DATA;
+ dump_buf += qed_read_param(dump_buf,
+ &param_name, &param_str_val, &param_num_val);
+ if (strcmp(param_name, "size"))
+ return DBG_STATUS_IGU_FIFO_BAD_DATA;
+ if (param_num_val % IGU_FIFO_ELEMENT_DWORDS)
+ return DBG_STATUS_IGU_FIFO_BAD_DATA;
+ num_elements = param_num_val / IGU_FIFO_ELEMENT_DWORDS;
+ elements = (struct igu_fifo_element *)dump_buf;
+
+ /* Decode elements */
+ for (i = 0; i < num_elements; i++) {
+ status = qed_parse_igu_fifo_element(&elements[i],
+ results_buf,
+ &results_offset);
+ if (status != DBG_STATUS_OK)
+ return status;
+ }
+
+ results_offset += sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "fifo contained %d elements", num_elements);
+
+ /* Add 1 for string NULL termination */
+ *parsed_results_bytes = results_offset + 1;
+
+ return DBG_STATUS_OK;
+}
+
+static enum dbg_status
+qed_parse_protection_override_dump(u32 *dump_buf,
+ char *results_buf,
+ u32 *parsed_results_bytes)
+{
+ const char *section_name, *param_name, *param_str_val;
+ u32 param_num_val, num_section_params, num_elements;
+ struct protection_override_element *elements;
+ u32 results_offset = 0;
+ u8 i;
+
+ /* Read global_params section */
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name, &num_section_params);
+ if (strcmp(section_name, "global_params"))
+ return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
+
+ /* Print global params */
+ dump_buf += qed_print_section_params(dump_buf,
+ num_section_params,
+ results_buf, &results_offset);
+
+ /* Read protection_override_data section */
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name, &num_section_params);
+ if (strcmp(section_name, "protection_override_data"))
+ return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
+ dump_buf += qed_read_param(dump_buf,
+ &param_name, &param_str_val, &param_num_val);
+ if (strcmp(param_name, "size"))
+ return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
+ if (param_num_val % PROTECTION_OVERRIDE_ELEMENT_DWORDS)
+ return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA;
+ num_elements = param_num_val / PROTECTION_OVERRIDE_ELEMENT_DWORDS;
+ elements = (struct protection_override_element *)dump_buf;
+
+ /* Decode elements */
+ for (i = 0; i < num_elements; i++) {
+ u32 address = GET_FIELD(elements[i].data,
+ PROTECTION_OVERRIDE_ELEMENT_ADDRESS) *
+ PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR;
+
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "window %2d, address: 0x%07x, size: %7d regs, read: %d, write: %d, read protection: %-12s, write protection: %-12s\n",
+ i, address,
+ (u32)GET_FIELD(elements[i].data,
+ PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE),
+ (u32)GET_FIELD(elements[i].data,
+ PROTECTION_OVERRIDE_ELEMENT_READ),
+ (u32)GET_FIELD(elements[i].data,
+ PROTECTION_OVERRIDE_ELEMENT_WRITE),
+ s_protection_strs[GET_FIELD(elements[i].data,
+ PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION)],
+ s_protection_strs[GET_FIELD(elements[i].data,
+ PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION)]);
+ }
+
+ results_offset += sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "protection override contained %d elements",
+ num_elements);
+
+ /* Add 1 for string NULL termination */
+ *parsed_results_bytes = results_offset + 1;
+
+ return DBG_STATUS_OK;
+}
+
+/* Parses a FW Asserts dump buffer.
+ * If result_buf is not NULL, the FW Asserts results are printed to it.
+ * In any case, the required results buffer size is assigned to
+ * parsed_results_bytes.
+ * The parsing status is returned.
+ */
+static enum dbg_status qed_parse_fw_asserts_dump(u32 *dump_buf,
+ char *results_buf,
+ u32 *parsed_results_bytes)
+{
+ u32 num_section_params, param_num_val, i, results_offset = 0;
+ const char *param_name, *param_str_val, *section_name;
+ bool last_section_found = false;
+
+ *parsed_results_bytes = 0;
+
+ /* Read global_params section */
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name, &num_section_params);
+ if (strcmp(section_name, "global_params"))
+ return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
+
+ /* Print global params */
+ dump_buf += qed_print_section_params(dump_buf,
+ num_section_params,
+ results_buf, &results_offset);
+
+ while (!last_section_found) {
+ dump_buf += qed_read_section_hdr(dump_buf,
+ &section_name,
+ &num_section_params);
+ if (!strcmp(section_name, "fw_asserts")) {
+ /* Extract params */
+ const char *storm_letter = NULL;
+ u32 storm_dump_size = 0;
+
+ for (i = 0; i < num_section_params; i++) {
+ dump_buf += qed_read_param(dump_buf,
+ &param_name,
+ &param_str_val,
+ &param_num_val);
+ if (!strcmp(param_name, "storm"))
+ storm_letter = param_str_val;
+ else if (!strcmp(param_name, "size"))
+ storm_dump_size = param_num_val;
+ else
+ return
+ DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
+ }
+
+ if (!storm_letter || !storm_dump_size)
+ return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
+
+ /* Print data */
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "\n%sSTORM_ASSERT: size=%d\n",
+ storm_letter, storm_dump_size);
+ for (i = 0; i < storm_dump_size; i++, dump_buf++)
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "%08x\n", *dump_buf);
+ } else if (!strcmp(section_name, "last")) {
+ last_section_found = true;
+ } else {
+ return DBG_STATUS_FW_ASSERTS_PARSE_FAILED;
+ }
+ }
+
+ /* Add 1 for string NULL termination */
+ *parsed_results_bytes = results_offset + 1;
+
+ return DBG_STATUS_OK;
+}
+
+/***************************** Public Functions *******************************/
+
+enum dbg_status qed_dbg_user_set_bin_ptr(struct qed_hwfn *p_hwfn,
+ const u8 * const bin_ptr)
+{
+ struct bin_buffer_hdr *buf_hdrs = (struct bin_buffer_hdr *)bin_ptr;
+ u8 buf_id;
+
+ /* Convert binary data to debug arrays */
+ for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++)
+ qed_set_dbg_bin_buf(p_hwfn,
+ (enum bin_dbg_buffer_type)buf_id,
+ (u32 *)(bin_ptr + buf_hdrs[buf_id].offset),
+ buf_hdrs[buf_id].length);
+
+ return DBG_STATUS_OK;
+}
+
+enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn,
+ void **user_data_ptr)
+{
+ *user_data_ptr = kzalloc(sizeof(struct dbg_tools_user_data),
+ GFP_KERNEL);
+ if (!(*user_data_ptr))
+ return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
+
+ return DBG_STATUS_OK;
+}
+
+const char *qed_dbg_get_status_str(enum dbg_status status)
+{
+ return (status <
+ MAX_DBG_STATUS) ? s_status_str[status] : "Invalid debug status";
+}
+
+enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size)
+{
+ u32 num_errors, num_warnings;
+
+ return qed_parse_idle_chk_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ NULL,
+ results_buf_size,
+ &num_errors, &num_warnings);
+}
+
+enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf,
+ u32 *num_errors,
+ u32 *num_warnings)
+{
+ u32 parsed_buf_size;
+
+ return qed_parse_idle_chk_dump(p_hwfn,
+ dump_buf,
+ num_dumped_dwords,
+ results_buf,
+ &parsed_buf_size,
+ num_errors, num_warnings);
+}
+
+void qed_dbg_mcp_trace_set_meta_data(struct qed_hwfn *p_hwfn,
+ const u32 *meta_buf)
+{
+ struct dbg_tools_user_data *dev_user_data =
+ qed_dbg_get_user_data(p_hwfn);
+
+ dev_user_data->mcp_trace_user_meta_buf = meta_buf;
+}
+
+enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size)
+{
+ return qed_parse_mcp_trace_dump(p_hwfn,
+ dump_buf, NULL, results_buf_size, true);
+}
+
+enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf)
+{
+ u32 parsed_buf_size;
+
+ /* Doesn't do anything, needed for compile time asserts */
+ qed_user_static_asserts();
+
+ return qed_parse_mcp_trace_dump(p_hwfn,
+ dump_buf,
+ results_buf, &parsed_buf_size, true);
+}
+
+enum dbg_status qed_print_mcp_trace_results_cont(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ char *results_buf)
+{
+ u32 parsed_buf_size;
+
+ return qed_parse_mcp_trace_dump(p_hwfn, dump_buf, results_buf,
+ &parsed_buf_size, false);
+}
+
+enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn,
+ u8 *dump_buf,
+ u32 num_dumped_bytes,
+ char *results_buf)
+{
+ u32 parsed_results_bytes;
+
+ return qed_parse_mcp_trace_buf(p_hwfn,
+ dump_buf,
+ num_dumped_bytes,
+ 0,
+ num_dumped_bytes,
+ results_buf, &parsed_results_bytes);
+}
+
+/* Frees the specified MCP Trace meta data */
+void qed_mcp_trace_free_meta_data(struct qed_hwfn *p_hwfn)
+{
+ struct dbg_tools_user_data *dev_user_data;
+ struct mcp_trace_meta *meta;
+ u32 i;
+
+ dev_user_data = qed_dbg_get_user_data(p_hwfn);
+ meta = &dev_user_data->mcp_trace_meta;
+ if (!meta->is_allocated)
+ return;
+
+ /* Release modules */
+ if (meta->modules) {
+ for (i = 0; i < meta->modules_num; i++)
+ kfree(meta->modules[i]);
+ kfree(meta->modules);
+ }
+
+ /* Release formats */
+ if (meta->formats) {
+ for (i = 0; i < meta->formats_num; i++)
+ kfree(meta->formats[i].format_str);
+ kfree(meta->formats);
+ }
+
+ meta->is_allocated = false;
+}
+
+enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size)
+{
+ return qed_parse_reg_fifo_dump(dump_buf, NULL, results_buf_size);
+}
+
+enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf)
+{
+ u32 parsed_buf_size;
+
+ return qed_parse_reg_fifo_dump(dump_buf, results_buf, &parsed_buf_size);
+}
+
+enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size)
+{
+ return qed_parse_igu_fifo_dump(dump_buf, NULL, results_buf_size);
+}
+
+enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf)
+{
+ u32 parsed_buf_size;
+
+ return qed_parse_igu_fifo_dump(dump_buf, results_buf, &parsed_buf_size);
+}
+
+enum dbg_status
+qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size)
+{
+ return qed_parse_protection_override_dump(dump_buf,
+ NULL, results_buf_size);
+}
+
+enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf)
+{
+ u32 parsed_buf_size;
+
+ return qed_parse_protection_override_dump(dump_buf,
+ results_buf,
+ &parsed_buf_size);
+}
+
+enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size)
+{
+ return qed_parse_fw_asserts_dump(dump_buf, NULL, results_buf_size);
+}
+
+enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf)
+{
+ u32 parsed_buf_size;
+
+ return qed_parse_fw_asserts_dump(dump_buf,
+ results_buf, &parsed_buf_size);
+}
+
+enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
+ struct dbg_attn_block_result *results)
+{
+ const u32 *block_attn_name_offsets;
+ const char *attn_name_base;
+ const char *block_name;
+ enum dbg_attn_type attn_type;
+ u8 num_regs, i, j;
+
+ num_regs = GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_NUM_REGS);
+ attn_type = GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE);
+ block_name = qed_dbg_get_block_name(p_hwfn, results->block_id);
+ if (!block_name)
+ return DBG_STATUS_INVALID_ARGS;
+
+ if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES].ptr ||
+ !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS].ptr ||
+ !p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr)
+ return DBG_STATUS_DBG_ARRAY_NOT_SET;
+
+ block_attn_name_offsets =
+ (u32 *)p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS].ptr +
+ results->names_offset;
+
+ attn_name_base = p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr;
+
+ /* Go over registers with a non-zero attention status */
+ for (i = 0; i < num_regs; i++) {
+ struct dbg_attn_bit_mapping *bit_mapping;
+ struct dbg_attn_reg_result *reg_result;
+ u8 num_reg_attn, bit_idx = 0;
+
+ reg_result = &results->reg_results[i];
+ num_reg_attn = GET_FIELD(reg_result->data,
+ DBG_ATTN_REG_RESULT_NUM_REG_ATTN);
+ bit_mapping = (struct dbg_attn_bit_mapping *)
+ p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES].ptr +
+ reg_result->block_attn_offset;
+
+ /* Go over attention status bits */
+ for (j = 0; j < num_reg_attn; j++) {
+ u16 attn_idx_val = GET_FIELD(bit_mapping[j].data,
+ DBG_ATTN_BIT_MAPPING_VAL);
+ const char *attn_name, *attn_type_str, *masked_str;
+ u32 attn_name_offset;
+ u32 sts_addr;
+
+ /* Check if bit mask should be advanced (due to unused
+ * bits).
+ */
+ if (GET_FIELD(bit_mapping[j].data,
+ DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT)) {
+ bit_idx += (u8)attn_idx_val;
+ continue;
+ }
+
+ /* Check current bit index */
+ if (reg_result->sts_val & BIT(bit_idx)) {
+ /* An attention bit with value=1 was found
+ * Find attention name
+ */
+ attn_name_offset =
+ block_attn_name_offsets[attn_idx_val];
+ attn_name = attn_name_base + attn_name_offset;
+ attn_type_str =
+ (attn_type ==
+ ATTN_TYPE_INTERRUPT ? "Interrupt" :
+ "Parity");
+ masked_str = reg_result->mask_val &
+ BIT(bit_idx) ?
+ " [masked]" : "";
+ sts_addr =
+ GET_FIELD(reg_result->data,
+ DBG_ATTN_REG_RESULT_STS_ADDRESS);
+ DP_NOTICE(p_hwfn,
+ "%s (%s) : %s [address 0x%08x, bit %d]%s\n",
+ block_name, attn_type_str, attn_name,
+ sts_addr * 4, bit_idx, masked_str);
+ }
+
+ bit_idx++;
+ }
+ }
+
+ return DBG_STATUS_OK;
+}
+
+/* Wrapper for unifying the idle_chk and mcp_trace api */
+static enum dbg_status
+qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf)
+{
+ u32 num_errors, num_warnnings;
+
+ return qed_print_idle_chk_results(p_hwfn, dump_buf, num_dumped_dwords,
+ results_buf, &num_errors,
+ &num_warnnings);
+}
+
+static DEFINE_MUTEX(qed_dbg_lock);
+
+#define MAX_PHY_RESULT_BUFFER 9000
+
+/******************************** Feature Meta data section ******************/
+
+#define GRC_NUM_STR_FUNCS 2
+#define IDLE_CHK_NUM_STR_FUNCS 1
+#define MCP_TRACE_NUM_STR_FUNCS 1
+#define REG_FIFO_NUM_STR_FUNCS 1
+#define IGU_FIFO_NUM_STR_FUNCS 1
+#define PROTECTION_OVERRIDE_NUM_STR_FUNCS 1
+#define FW_ASSERTS_NUM_STR_FUNCS 1
+#define ILT_NUM_STR_FUNCS 1
+#define PHY_NUM_STR_FUNCS 20
+
+/* Feature meta data lookup table */
+static struct {
+ char *name;
+ u32 num_funcs;
+ enum dbg_status (*get_size)(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *size);
+ enum dbg_status (*perform_dump)(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *dump_buf,
+ u32 buf_size, u32 *dumped_dwords);
+ enum dbg_status (*print_results)(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf, u32 num_dumped_dwords,
+ char *results_buf);
+ enum dbg_status (*results_buf_size)(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size);
+ const struct qed_func_lookup *hsi_func_lookup;
+} qed_features_lookup[] = {
+ {
+ "grc", GRC_NUM_STR_FUNCS, qed_dbg_grc_get_dump_buf_size,
+ qed_dbg_grc_dump, NULL, NULL, NULL}, {
+ "idle_chk", IDLE_CHK_NUM_STR_FUNCS,
+ qed_dbg_idle_chk_get_dump_buf_size,
+ qed_dbg_idle_chk_dump,
+ qed_print_idle_chk_results_wrapper,
+ qed_get_idle_chk_results_buf_size,
+ NULL}, {
+ "mcp_trace", MCP_TRACE_NUM_STR_FUNCS,
+ qed_dbg_mcp_trace_get_dump_buf_size,
+ qed_dbg_mcp_trace_dump, qed_print_mcp_trace_results,
+ qed_get_mcp_trace_results_buf_size,
+ NULL}, {
+ "reg_fifo", REG_FIFO_NUM_STR_FUNCS,
+ qed_dbg_reg_fifo_get_dump_buf_size,
+ qed_dbg_reg_fifo_dump, qed_print_reg_fifo_results,
+ qed_get_reg_fifo_results_buf_size,
+ NULL}, {
+ "igu_fifo", IGU_FIFO_NUM_STR_FUNCS,
+ qed_dbg_igu_fifo_get_dump_buf_size,
+ qed_dbg_igu_fifo_dump, qed_print_igu_fifo_results,
+ qed_get_igu_fifo_results_buf_size,
+ NULL}, {
+ "protection_override", PROTECTION_OVERRIDE_NUM_STR_FUNCS,
+ qed_dbg_protection_override_get_dump_buf_size,
+ qed_dbg_protection_override_dump,
+ qed_print_protection_override_results,
+ qed_get_protection_override_results_buf_size,
+ NULL}, {
+ "fw_asserts", FW_ASSERTS_NUM_STR_FUNCS,
+ qed_dbg_fw_asserts_get_dump_buf_size,
+ qed_dbg_fw_asserts_dump,
+ qed_print_fw_asserts_results,
+ qed_get_fw_asserts_results_buf_size,
+ NULL}, {
+ "ilt", ILT_NUM_STR_FUNCS, qed_dbg_ilt_get_dump_buf_size,
+ qed_dbg_ilt_dump, NULL, NULL, NULL},};
+
+static void qed_dbg_print_feature(u8 *p_text_buf, u32 text_size)
+{
+ u32 i, precision = 80;
+
+ if (!p_text_buf)
+ return;
+
+ pr_notice("\n%.*s", precision, p_text_buf);
+ for (i = precision; i < text_size; i += precision)
+ pr_cont("%.*s", precision, p_text_buf + i);
+ pr_cont("\n");
+}
+
+#define QED_RESULTS_BUF_MIN_SIZE 16
+/* Generic function for decoding debug feature info */
+static enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
+ enum qed_dbg_features feature_idx)
+{
+ struct qed_dbg_feature *feature =
+ &p_hwfn->cdev->dbg_features[feature_idx];
+ u32 txt_size_bytes, null_char_pos, i;
+ u32 *dbuf, dwords;
+ enum dbg_status rc;
+ char *text_buf;
+
+ /* Check if feature supports formatting capability */
+ if (!qed_features_lookup[feature_idx].results_buf_size)
+ return DBG_STATUS_OK;
+
+ dbuf = (u32 *)feature->dump_buf;
+ dwords = feature->dumped_dwords;
+
+ /* Obtain size of formatted output */
+ rc = qed_features_lookup[feature_idx].results_buf_size(p_hwfn,
+ dbuf,
+ dwords,
+ &txt_size_bytes);
+ if (rc != DBG_STATUS_OK)
+ return rc;
+
+ /* Make sure that the allocated size is a multiple of dword
+ * (4 bytes).
+ */
+ null_char_pos = txt_size_bytes - 1;
+ txt_size_bytes = (txt_size_bytes + 3) & ~0x3;
+
+ if (txt_size_bytes < QED_RESULTS_BUF_MIN_SIZE) {
+ DP_NOTICE(p_hwfn->cdev,
+ "formatted size of feature was too small %d. Aborting\n",
+ txt_size_bytes);
+ return DBG_STATUS_INVALID_ARGS;
+ }
+
+ /* allocate temp text buf */
+ text_buf = vzalloc(txt_size_bytes);
+ if (!text_buf) {
+ DP_NOTICE(p_hwfn->cdev,
+ "failed to allocate text buffer. Aborting\n");
+ return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
+ }
+
+ /* Decode feature opcodes to string on temp buf */
+ rc = qed_features_lookup[feature_idx].print_results(p_hwfn,
+ dbuf,
+ dwords,
+ text_buf);
+ if (rc != DBG_STATUS_OK) {
+ vfree(text_buf);
+ return rc;
+ }
+
+ /* Replace the original null character with a '\n' character.
+ * The bytes that were added as a result of the dword alignment are also
+ * padded with '\n' characters.
+ */
+ for (i = null_char_pos; i < txt_size_bytes; i++)
+ text_buf[i] = '\n';
+
+ /* Dump printable feature to log */
+ if (p_hwfn->cdev->print_dbg_data)
+ qed_dbg_print_feature(text_buf, txt_size_bytes);
+
+ /* Dump binary data as is to the output file */
+ if (p_hwfn->cdev->dbg_bin_dump) {
+ vfree(text_buf);
+ return rc;
+ }
+
+ /* Free the old dump_buf and point the dump_buf to the newly allocated
+ * and formatted text buffer.
+ */
+ vfree(feature->dump_buf);
+ feature->dump_buf = text_buf;
+ feature->buf_size = txt_size_bytes;
+ feature->dumped_dwords = txt_size_bytes / 4;
+
+ return rc;
+}
+
+#define MAX_DBG_FEATURE_SIZE_DWORDS 0x3FFFFFFF
+
+/* Generic function for performing the dump of a debug feature. */
+static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_dbg_features feature_idx)
+{
+ struct qed_dbg_feature *feature =
+ &p_hwfn->cdev->dbg_features[feature_idx];
+ u32 buf_size_dwords, *dbuf, *dwords;
+ enum dbg_status rc;
+
+ DP_NOTICE(p_hwfn->cdev, "Collecting a debug feature [\"%s\"]\n",
+ qed_features_lookup[feature_idx].name);
+
+ /* Dump_buf was already allocated need to free (this can happen if dump
+ * was called but file was never read).
+ * We can't use the buffer as is since size may have changed.
+ */
+ if (feature->dump_buf) {
+ vfree(feature->dump_buf);
+ feature->dump_buf = NULL;
+ }
+
+ /* Get buffer size from hsi, allocate accordingly, and perform the
+ * dump.
+ */
+ rc = qed_features_lookup[feature_idx].get_size(p_hwfn, p_ptt,
+ &buf_size_dwords);
+ if (rc != DBG_STATUS_OK && rc != DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
+ return rc;
+
+ if (buf_size_dwords > MAX_DBG_FEATURE_SIZE_DWORDS) {
+ feature->buf_size = 0;
+ DP_NOTICE(p_hwfn->cdev,
+ "Debug feature [\"%s\"] size (0x%x dwords) exceeds maximum size (0x%x dwords)\n",
+ qed_features_lookup[feature_idx].name,
+ buf_size_dwords, MAX_DBG_FEATURE_SIZE_DWORDS);
+
+ return DBG_STATUS_OK;
+ }
+
+ feature->buf_size = buf_size_dwords * sizeof(u32);
+ feature->dump_buf = vmalloc(feature->buf_size);
+ if (!feature->dump_buf)
+ return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
+
+ dbuf = (u32 *)feature->dump_buf;
+ dwords = &feature->dumped_dwords;
+ rc = qed_features_lookup[feature_idx].perform_dump(p_hwfn, p_ptt,
+ dbuf,
+ feature->buf_size /
+ sizeof(u32),
+ dwords);
+
+ /* If mcp is stuck we get DBG_STATUS_NVRAM_GET_IMAGE_FAILED error.
+ * In this case the buffer holds valid binary data, but we won't able
+ * to parse it (since parsing relies on data in NVRAM which is only
+ * accessible when MFW is responsive). skip the formatting but return
+ * success so that binary data is provided.
+ */
+ if (rc == DBG_STATUS_NVRAM_GET_IMAGE_FAILED)
+ return DBG_STATUS_OK;
+
+ if (rc != DBG_STATUS_OK)
+ return rc;
+
+ /* Format output */
+ rc = format_feature(p_hwfn, feature_idx);
+ return rc;
+}
+
+int qed_dbg_grc(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
+{
+ return qed_dbg_feature(cdev, buffer, DBG_FEATURE_GRC, num_dumped_bytes);
+}
+
+int qed_dbg_grc_size(struct qed_dev *cdev)
+{
+ return qed_dbg_feature_size(cdev, DBG_FEATURE_GRC);
+}
+
+int qed_dbg_idle_chk(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
+{
+ return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IDLE_CHK,
+ num_dumped_bytes);
+}
+
+int qed_dbg_idle_chk_size(struct qed_dev *cdev)
+{
+ return qed_dbg_feature_size(cdev, DBG_FEATURE_IDLE_CHK);
+}
+
+int qed_dbg_reg_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
+{
+ return qed_dbg_feature(cdev, buffer, DBG_FEATURE_REG_FIFO,
+ num_dumped_bytes);
+}
+
+int qed_dbg_reg_fifo_size(struct qed_dev *cdev)
+{
+ return qed_dbg_feature_size(cdev, DBG_FEATURE_REG_FIFO);
+}
+
+int qed_dbg_igu_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
+{
+ return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IGU_FIFO,
+ num_dumped_bytes);
+}
+
+int qed_dbg_igu_fifo_size(struct qed_dev *cdev)
+{
+ return qed_dbg_feature_size(cdev, DBG_FEATURE_IGU_FIFO);
+}
+
+static int qed_dbg_nvm_image_length(struct qed_hwfn *p_hwfn,
+ enum qed_nvm_images image_id, u32 *length)
+{
+ struct qed_nvm_image_att image_att;
+ int rc;
+
+ *length = 0;
+ rc = qed_mcp_get_nvm_image_att(p_hwfn, image_id, &image_att);
+ if (rc)
+ return rc;
+
+ *length = image_att.length;
+
+ return rc;
+}
+
+static int qed_dbg_nvm_image(struct qed_dev *cdev, void *buffer,
+ u32 *num_dumped_bytes,
+ enum qed_nvm_images image_id)
+{
+ struct qed_hwfn *p_hwfn =
+ &cdev->hwfns[cdev->engine_for_debug];
+ u32 len_rounded;
+ int rc;
+
+ *num_dumped_bytes = 0;
+ rc = qed_dbg_nvm_image_length(p_hwfn, image_id, &len_rounded);
+ if (rc)
+ return rc;
+
+ DP_NOTICE(p_hwfn->cdev,
+ "Collecting a debug feature [\"nvram image %d\"]\n",
+ image_id);
+
+ len_rounded = roundup(len_rounded, sizeof(u32));
+ rc = qed_mcp_get_nvm_image(p_hwfn, image_id, buffer, len_rounded);
+ if (rc)
+ return rc;
+
+ /* QED_NVM_IMAGE_NVM_META image is not swapped like other images */
+ if (image_id != QED_NVM_IMAGE_NVM_META)
+ cpu_to_be32_array((__force __be32 *)buffer,
+ (const u32 *)buffer,
+ len_rounded / sizeof(u32));
+
+ *num_dumped_bytes = len_rounded;
+
+ return rc;
+}
+
+int qed_dbg_protection_override(struct qed_dev *cdev, void *buffer,
+ u32 *num_dumped_bytes)
+{
+ return qed_dbg_feature(cdev, buffer, DBG_FEATURE_PROTECTION_OVERRIDE,
+ num_dumped_bytes);
+}
+
+int qed_dbg_protection_override_size(struct qed_dev *cdev)
+{
+ return qed_dbg_feature_size(cdev, DBG_FEATURE_PROTECTION_OVERRIDE);
+}
+
+int qed_dbg_fw_asserts(struct qed_dev *cdev, void *buffer,
+ u32 *num_dumped_bytes)
+{
+ return qed_dbg_feature(cdev, buffer, DBG_FEATURE_FW_ASSERTS,
+ num_dumped_bytes);
+}
+
+int qed_dbg_fw_asserts_size(struct qed_dev *cdev)
+{
+ return qed_dbg_feature_size(cdev, DBG_FEATURE_FW_ASSERTS);
+}
+
+int qed_dbg_ilt(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes)
+{
+ return qed_dbg_feature(cdev, buffer, DBG_FEATURE_ILT, num_dumped_bytes);
+}
+
+int qed_dbg_ilt_size(struct qed_dev *cdev)
+{
+ return qed_dbg_feature_size(cdev, DBG_FEATURE_ILT);
+}
+
+int qed_dbg_mcp_trace(struct qed_dev *cdev, void *buffer,
+ u32 *num_dumped_bytes)
+{
+ return qed_dbg_feature(cdev, buffer, DBG_FEATURE_MCP_TRACE,
+ num_dumped_bytes);
+}
+
+int qed_dbg_mcp_trace_size(struct qed_dev *cdev)
+{
+ return qed_dbg_feature_size(cdev, DBG_FEATURE_MCP_TRACE);
+}
+
+/* Defines the amount of bytes allocated for recording the length of debugfs
+ * feature buffer.
+ */
+#define REGDUMP_HEADER_SIZE sizeof(u32)
+#define REGDUMP_HEADER_SIZE_SHIFT 0
+#define REGDUMP_HEADER_SIZE_MASK 0xffffff
+#define REGDUMP_HEADER_FEATURE_SHIFT 24
+#define REGDUMP_HEADER_FEATURE_MASK 0x1f
+#define REGDUMP_HEADER_BIN_DUMP_SHIFT 29
+#define REGDUMP_HEADER_BIN_DUMP_MASK 0x1
+#define REGDUMP_HEADER_OMIT_ENGINE_SHIFT 30
+#define REGDUMP_HEADER_OMIT_ENGINE_MASK 0x1
+#define REGDUMP_HEADER_ENGINE_SHIFT 31
+#define REGDUMP_HEADER_ENGINE_MASK 0x1
+#define REGDUMP_MAX_SIZE 0x1000000
+#define ILT_DUMP_MAX_SIZE (1024 * 1024 * 15)
+
+enum debug_print_features {
+ OLD_MODE = 0,
+ IDLE_CHK = 1,
+ GRC_DUMP = 2,
+ MCP_TRACE = 3,
+ REG_FIFO = 4,
+ PROTECTION_OVERRIDE = 5,
+ IGU_FIFO = 6,
+ PHY = 7,
+ FW_ASSERTS = 8,
+ NVM_CFG1 = 9,
+ DEFAULT_CFG = 10,
+ NVM_META = 11,
+ MDUMP = 12,
+ ILT_DUMP = 13,
+};
+
+static u32 qed_calc_regdump_header(struct qed_dev *cdev,
+ enum debug_print_features feature,
+ int engine, u32 feature_size,
+ u8 omit_engine, u8 dbg_bin_dump)
+{
+ u32 res = 0;
+
+ SET_FIELD(res, REGDUMP_HEADER_SIZE, feature_size);
+ if (res != feature_size)
+ DP_NOTICE(cdev,
+ "Feature %d is too large (size 0x%x) and will corrupt the dump\n",
+ feature, feature_size);
+
+ SET_FIELD(res, REGDUMP_HEADER_FEATURE, feature);
+ SET_FIELD(res, REGDUMP_HEADER_BIN_DUMP, dbg_bin_dump);
+ SET_FIELD(res, REGDUMP_HEADER_OMIT_ENGINE, omit_engine);
+ SET_FIELD(res, REGDUMP_HEADER_ENGINE, engine);
+
+ return res;
+}
+
+int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
+{
+ u8 cur_engine, omit_engine = 0, org_engine;
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[cdev->engine_for_debug];
+ struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
+ int grc_params[MAX_DBG_GRC_PARAMS], rc, i;
+ u32 offset = 0, feature_size;
+
+ for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
+ grc_params[i] = dev_data->grc.param_val[i];
+
+ if (!QED_IS_CMT(cdev))
+ omit_engine = 1;
+
+ cdev->dbg_bin_dump = 1;
+ mutex_lock(&qed_dbg_lock);
+
+ org_engine = qed_get_debug_engine(cdev);
+ for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
+ /* Collect idle_chks and grcDump for each hw function */
+ DP_VERBOSE(cdev, QED_MSG_DEBUG,
+ "obtaining idle_chk and grcdump for current engine\n");
+ qed_set_debug_engine(cdev, cur_engine);
+
+ /* First idle_chk */
+ rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset +
+ REGDUMP_HEADER_SIZE, &feature_size);
+ if (!rc) {
+ *(u32 *)((u8 *)buffer + offset) =
+ qed_calc_regdump_header(cdev, IDLE_CHK,
+ cur_engine,
+ feature_size,
+ omit_engine,
+ cdev->dbg_bin_dump);
+ offset += (feature_size + REGDUMP_HEADER_SIZE);
+ } else {
+ DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
+ }
+
+ /* Second idle_chk */
+ rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset +
+ REGDUMP_HEADER_SIZE, &feature_size);
+ if (!rc) {
+ *(u32 *)((u8 *)buffer + offset) =
+ qed_calc_regdump_header(cdev, IDLE_CHK,
+ cur_engine,
+ feature_size,
+ omit_engine,
+ cdev->dbg_bin_dump);
+ offset += (feature_size + REGDUMP_HEADER_SIZE);
+ } else {
+ DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
+ }
+
+ /* reg_fifo dump */
+ rc = qed_dbg_reg_fifo(cdev, (u8 *)buffer + offset +
+ REGDUMP_HEADER_SIZE, &feature_size);
+ if (!rc) {
+ *(u32 *)((u8 *)buffer + offset) =
+ qed_calc_regdump_header(cdev, REG_FIFO,
+ cur_engine,
+ feature_size,
+ omit_engine,
+ cdev->dbg_bin_dump);
+ offset += (feature_size + REGDUMP_HEADER_SIZE);
+ } else {
+ DP_ERR(cdev, "qed_dbg_reg_fifo failed. rc = %d\n", rc);
+ }
+
+ /* igu_fifo dump */
+ rc = qed_dbg_igu_fifo(cdev, (u8 *)buffer + offset +
+ REGDUMP_HEADER_SIZE, &feature_size);
+ if (!rc) {
+ *(u32 *)((u8 *)buffer + offset) =
+ qed_calc_regdump_header(cdev, IGU_FIFO,
+ cur_engine,
+ feature_size,
+ omit_engine,
+ cdev->dbg_bin_dump);
+ offset += (feature_size + REGDUMP_HEADER_SIZE);
+ } else {
+ DP_ERR(cdev, "qed_dbg_igu_fifo failed. rc = %d", rc);
+ }
+
+ /* protection_override dump */
+ rc = qed_dbg_protection_override(cdev, (u8 *)buffer + offset +
+ REGDUMP_HEADER_SIZE,
+ &feature_size);
+ if (!rc) {
+ *(u32 *)((u8 *)buffer + offset) =
+ qed_calc_regdump_header(cdev,
+ PROTECTION_OVERRIDE,
+ cur_engine,
+ feature_size,
+ omit_engine,
+ cdev->dbg_bin_dump);
+ offset += (feature_size + REGDUMP_HEADER_SIZE);
+ } else {
+ DP_ERR(cdev,
+ "qed_dbg_protection_override failed. rc = %d\n",
+ rc);
+ }
+
+ /* fw_asserts dump */
+ rc = qed_dbg_fw_asserts(cdev, (u8 *)buffer + offset +
+ REGDUMP_HEADER_SIZE, &feature_size);
+ if (!rc) {
+ *(u32 *)((u8 *)buffer + offset) =
+ qed_calc_regdump_header(cdev, FW_ASSERTS,
+ cur_engine,
+ feature_size,
+ omit_engine,
+ cdev->dbg_bin_dump);
+ offset += (feature_size + REGDUMP_HEADER_SIZE);
+ } else {
+ DP_ERR(cdev, "qed_dbg_fw_asserts failed. rc = %d\n",
+ rc);
+ }
+
+ feature_size = qed_dbg_ilt_size(cdev);
+ if (!cdev->disable_ilt_dump && feature_size <
+ ILT_DUMP_MAX_SIZE) {
+ rc = qed_dbg_ilt(cdev, (u8 *)buffer + offset +
+ REGDUMP_HEADER_SIZE, &feature_size);
+ if (!rc) {
+ *(u32 *)((u8 *)buffer + offset) =
+ qed_calc_regdump_header(cdev, ILT_DUMP,
+ cur_engine,
+ feature_size,
+ omit_engine,
+ cdev->dbg_bin_dump);
+ offset += (feature_size + REGDUMP_HEADER_SIZE);
+ } else {
+ DP_ERR(cdev, "qed_dbg_ilt failed. rc = %d\n",
+ rc);
+ }
+ }
+
+ /* Grc dump - must be last because when mcp stuck it will
+ * clutter idle_chk, reg_fifo, ...
+ */
+ for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
+ dev_data->grc.param_val[i] = grc_params[i];
+
+ rc = qed_dbg_grc(cdev, (u8 *)buffer + offset +
+ REGDUMP_HEADER_SIZE, &feature_size);
+ if (!rc) {
+ *(u32 *)((u8 *)buffer + offset) =
+ qed_calc_regdump_header(cdev, GRC_DUMP,
+ cur_engine,
+ feature_size,
+ omit_engine,
+ cdev->dbg_bin_dump);
+ offset += (feature_size + REGDUMP_HEADER_SIZE);
+ } else {
+ DP_ERR(cdev, "qed_dbg_grc failed. rc = %d", rc);
+ }
+ }
+
+ qed_set_debug_engine(cdev, org_engine);
+
+ /* mcp_trace */
+ rc = qed_dbg_mcp_trace(cdev, (u8 *)buffer + offset +
+ REGDUMP_HEADER_SIZE, &feature_size);
+ if (!rc) {
+ *(u32 *)((u8 *)buffer + offset) =
+ qed_calc_regdump_header(cdev, MCP_TRACE, cur_engine,
+ feature_size, omit_engine,
+ cdev->dbg_bin_dump);
+ offset += (feature_size + REGDUMP_HEADER_SIZE);
+ } else {
+ DP_ERR(cdev, "qed_dbg_mcp_trace failed. rc = %d\n", rc);
+ }
+
+ /* nvm cfg1 */
+ rc = qed_dbg_nvm_image(cdev,
+ (u8 *)buffer + offset +
+ REGDUMP_HEADER_SIZE, &feature_size,
+ QED_NVM_IMAGE_NVM_CFG1);
+ if (!rc) {
+ *(u32 *)((u8 *)buffer + offset) =
+ qed_calc_regdump_header(cdev, NVM_CFG1, cur_engine,
+ feature_size, omit_engine,
+ cdev->dbg_bin_dump);
+ offset += (feature_size + REGDUMP_HEADER_SIZE);
+ } else if (rc != -ENOENT) {
+ DP_ERR(cdev,
+ "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
+ QED_NVM_IMAGE_NVM_CFG1, "QED_NVM_IMAGE_NVM_CFG1",
+ rc);
+ }
+
+ /* nvm default */
+ rc = qed_dbg_nvm_image(cdev,
+ (u8 *)buffer + offset +
+ REGDUMP_HEADER_SIZE, &feature_size,
+ QED_NVM_IMAGE_DEFAULT_CFG);
+ if (!rc) {
+ *(u32 *)((u8 *)buffer + offset) =
+ qed_calc_regdump_header(cdev, DEFAULT_CFG,
+ cur_engine, feature_size,
+ omit_engine,
+ cdev->dbg_bin_dump);
+ offset += (feature_size + REGDUMP_HEADER_SIZE);
+ } else if (rc != -ENOENT) {
+ DP_ERR(cdev,
+ "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
+ QED_NVM_IMAGE_DEFAULT_CFG,
+ "QED_NVM_IMAGE_DEFAULT_CFG", rc);
+ }
+
+ /* nvm meta */
+ rc = qed_dbg_nvm_image(cdev,
+ (u8 *)buffer + offset +
+ REGDUMP_HEADER_SIZE, &feature_size,
+ QED_NVM_IMAGE_NVM_META);
+ if (!rc) {
+ *(u32 *)((u8 *)buffer + offset) =
+ qed_calc_regdump_header(cdev, NVM_META, cur_engine,
+ feature_size, omit_engine,
+ cdev->dbg_bin_dump);
+ offset += (feature_size + REGDUMP_HEADER_SIZE);
+ } else if (rc != -ENOENT) {
+ DP_ERR(cdev,
+ "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
+ QED_NVM_IMAGE_NVM_META, "QED_NVM_IMAGE_NVM_META",
+ rc);
+ }
+
+ /* nvm mdump */
+ rc = qed_dbg_nvm_image(cdev, (u8 *)buffer + offset +
+ REGDUMP_HEADER_SIZE, &feature_size,
+ QED_NVM_IMAGE_MDUMP);
+ if (!rc) {
+ *(u32 *)((u8 *)buffer + offset) =
+ qed_calc_regdump_header(cdev, MDUMP, cur_engine,
+ feature_size, omit_engine,
+ cdev->dbg_bin_dump);
+ offset += (feature_size + REGDUMP_HEADER_SIZE);
+ } else if (rc != -ENOENT) {
+ DP_ERR(cdev,
+ "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
+ QED_NVM_IMAGE_MDUMP, "QED_NVM_IMAGE_MDUMP", rc);
+ }
+
+ mutex_unlock(&qed_dbg_lock);
+ cdev->dbg_bin_dump = 0;
+
+ return 0;
+}
+
+int qed_dbg_all_data_size(struct qed_dev *cdev)
+{
+ u32 regs_len = 0, image_len = 0, ilt_len = 0, total_ilt_len = 0;
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[cdev->engine_for_debug];
+ u8 cur_engine, org_engine;
+
+ cdev->disable_ilt_dump = false;
+ org_engine = qed_get_debug_engine(cdev);
+ for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
+ /* Engine specific */
+ DP_VERBOSE(cdev, QED_MSG_DEBUG,
+ "calculating idle_chk and grcdump register length for current engine\n");
+ qed_set_debug_engine(cdev, cur_engine);
+ regs_len += REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
+ REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
+ REGDUMP_HEADER_SIZE + qed_dbg_grc_size(cdev) +
+ REGDUMP_HEADER_SIZE + qed_dbg_reg_fifo_size(cdev) +
+ REGDUMP_HEADER_SIZE + qed_dbg_igu_fifo_size(cdev) +
+ REGDUMP_HEADER_SIZE +
+ qed_dbg_protection_override_size(cdev) +
+ REGDUMP_HEADER_SIZE + qed_dbg_fw_asserts_size(cdev);
+ ilt_len = REGDUMP_HEADER_SIZE + qed_dbg_ilt_size(cdev);
+ if (ilt_len < ILT_DUMP_MAX_SIZE) {
+ total_ilt_len += ilt_len;
+ regs_len += ilt_len;
+ }
+ }
+
+ qed_set_debug_engine(cdev, org_engine);
+
+ /* Engine common */
+ regs_len += REGDUMP_HEADER_SIZE + qed_dbg_mcp_trace_size(cdev) +
+ REGDUMP_HEADER_SIZE + qed_dbg_phy_size(cdev);
+ qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_NVM_CFG1, &image_len);
+ if (image_len)
+ regs_len += REGDUMP_HEADER_SIZE + image_len;
+ qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_DEFAULT_CFG, &image_len);
+ if (image_len)
+ regs_len += REGDUMP_HEADER_SIZE + image_len;
+ qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_NVM_META, &image_len);
+ if (image_len)
+ regs_len += REGDUMP_HEADER_SIZE + image_len;
+ qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_MDUMP, &image_len);
+ if (image_len)
+ regs_len += REGDUMP_HEADER_SIZE + image_len;
+
+ if (regs_len > REGDUMP_MAX_SIZE) {
+ DP_VERBOSE(cdev, QED_MSG_DEBUG,
+ "Dump exceeds max size 0x%x, disable ILT dump\n",
+ REGDUMP_MAX_SIZE);
+ cdev->disable_ilt_dump = true;
+ regs_len -= total_ilt_len;
+ }
+
+ return regs_len;
+}
+
+int qed_dbg_feature(struct qed_dev *cdev, void *buffer,
+ enum qed_dbg_features feature, u32 *num_dumped_bytes)
+{
+ struct qed_dbg_feature *qed_feature = &cdev->dbg_features[feature];
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[cdev->engine_for_debug];
+ enum dbg_status dbg_rc;
+ struct qed_ptt *p_ptt;
+ int rc = 0;
+
+ /* Acquire ptt */
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EINVAL;
+
+ /* Get dump */
+ dbg_rc = qed_dbg_dump(p_hwfn, p_ptt, feature);
+ if (dbg_rc != DBG_STATUS_OK) {
+ DP_VERBOSE(cdev, QED_MSG_DEBUG, "%s\n",
+ qed_dbg_get_status_str(dbg_rc));
+ *num_dumped_bytes = 0;
+ rc = -EINVAL;
+ goto out;
+ }
+
+ DP_VERBOSE(cdev, QED_MSG_DEBUG,
+ "copying debugfs feature to external buffer\n");
+ memcpy(buffer, qed_feature->dump_buf, qed_feature->buf_size);
+ *num_dumped_bytes = cdev->dbg_features[feature].dumped_dwords *
+ 4;
+
+out:
+ qed_ptt_release(p_hwfn, p_ptt);
+ return rc;
+}
+
+int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature)
+{
+ struct qed_dbg_feature *qed_feature = &cdev->dbg_features[feature];
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[cdev->engine_for_debug];
+ struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
+ u32 buf_size_dwords;
+ enum dbg_status rc;
+
+ if (!p_ptt)
+ return -EINVAL;
+
+ rc = qed_features_lookup[feature].get_size(p_hwfn, p_ptt,
+ &buf_size_dwords);
+ if (rc != DBG_STATUS_OK)
+ buf_size_dwords = 0;
+
+ /* Feature will not be dumped if it exceeds maximum size */
+ if (buf_size_dwords > MAX_DBG_FEATURE_SIZE_DWORDS)
+ buf_size_dwords = 0;
+
+ qed_ptt_release(p_hwfn, p_ptt);
+ qed_feature->buf_size = buf_size_dwords * sizeof(u32);
+ return qed_feature->buf_size;
+}
+
+int qed_dbg_phy_size(struct qed_dev *cdev)
+{
+ /* return max size of phy info and
+ * phy mac_stat multiplied by the number of ports
+ */
+ return MAX_PHY_RESULT_BUFFER * (1 + qed_device_num_ports(cdev));
+}
+
+u8 qed_get_debug_engine(struct qed_dev *cdev)
+{
+ return cdev->engine_for_debug;
+}
+
+void qed_set_debug_engine(struct qed_dev *cdev, int engine_number)
+{
+ DP_VERBOSE(cdev, QED_MSG_DEBUG, "set debug engine to %d\n",
+ engine_number);
+ cdev->engine_for_debug = engine_number;
+}
+
+void qed_dbg_pf_init(struct qed_dev *cdev)
+{
+ const u8 *dbg_values = NULL;
+ int i;
+
+ /* Sync ver with debugbus qed code */
+ qed_dbg_set_app_ver(TOOLS_VERSION);
+
+ /* Debug values are after init values.
+ * The offset is the first dword of the file.
+ */
+ dbg_values = cdev->firmware->data + *(u32 *)cdev->firmware->data;
+
+ for_each_hwfn(cdev, i) {
+ qed_dbg_set_bin_ptr(&cdev->hwfns[i], dbg_values);
+ qed_dbg_user_set_bin_ptr(&cdev->hwfns[i], dbg_values);
+ }
+
+ /* Set the hwfn to be 0 as default */
+ cdev->engine_for_debug = 0;
+}
+
+void qed_dbg_pf_exit(struct qed_dev *cdev)
+{
+ struct qed_dbg_feature *feature = NULL;
+ enum qed_dbg_features feature_idx;
+
+ /* debug features' buffers may be allocated if debug feature was used
+ * but dump wasn't called
+ */
+ for (feature_idx = 0; feature_idx < DBG_FEATURE_NUM; feature_idx++) {
+ feature = &cdev->dbg_features[feature_idx];
+ if (feature->dump_buf) {
+ vfree(feature->dump_buf);
+ feature->dump_buf = NULL;
+ }
+ }
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.h b/drivers/net/ethernet/qlogic/qed/qed_debug.h
new file mode 100644
index 000000000..b0d4b937c
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2019-2021 Marvell International Ltd.
+ */
+
+#ifndef _QED_DEBUG_H
+#define _QED_DEBUG_H
+
+enum qed_dbg_features {
+ DBG_FEATURE_GRC,
+ DBG_FEATURE_IDLE_CHK,
+ DBG_FEATURE_MCP_TRACE,
+ DBG_FEATURE_REG_FIFO,
+ DBG_FEATURE_IGU_FIFO,
+ DBG_FEATURE_PROTECTION_OVERRIDE,
+ DBG_FEATURE_FW_ASSERTS,
+ DBG_FEATURE_ILT,
+ DBG_FEATURE_NUM
+};
+
+/* Forward Declaration */
+struct qed_dev;
+struct qed_hwfn;
+
+int qed_dbg_grc(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes);
+int qed_dbg_grc_size(struct qed_dev *cdev);
+int qed_dbg_idle_chk(struct qed_dev *cdev, void *buffer,
+ u32 *num_dumped_bytes);
+int qed_dbg_idle_chk_size(struct qed_dev *cdev);
+int qed_dbg_reg_fifo(struct qed_dev *cdev, void *buffer,
+ u32 *num_dumped_bytes);
+int qed_dbg_reg_fifo_size(struct qed_dev *cdev);
+int qed_dbg_igu_fifo(struct qed_dev *cdev, void *buffer,
+ u32 *num_dumped_bytes);
+int qed_dbg_igu_fifo_size(struct qed_dev *cdev);
+int qed_dbg_protection_override(struct qed_dev *cdev, void *buffer,
+ u32 *num_dumped_bytes);
+int qed_dbg_protection_override_size(struct qed_dev *cdev);
+int qed_dbg_fw_asserts(struct qed_dev *cdev, void *buffer,
+ u32 *num_dumped_bytes);
+int qed_dbg_fw_asserts_size(struct qed_dev *cdev);
+int qed_dbg_ilt(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes);
+int qed_dbg_ilt_size(struct qed_dev *cdev);
+int qed_dbg_mcp_trace(struct qed_dev *cdev, void *buffer,
+ u32 *num_dumped_bytes);
+int qed_dbg_mcp_trace_size(struct qed_dev *cdev);
+int qed_dbg_phy_size(struct qed_dev *cdev);
+int qed_dbg_all_data(struct qed_dev *cdev, void *buffer);
+int qed_dbg_all_data_size(struct qed_dev *cdev);
+u8 qed_get_debug_engine(struct qed_dev *cdev);
+void qed_set_debug_engine(struct qed_dev *cdev, int engine_number);
+int qed_dbg_feature(struct qed_dev *cdev, void *buffer,
+ enum qed_dbg_features feature, u32 *num_dumped_bytes);
+int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature);
+
+void qed_dbg_pf_init(struct qed_dev *cdev);
+void qed_dbg_pf_exit(struct qed_dev *cdev);
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
new file mode 100644
index 000000000..86a93cac2
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -0,0 +1,5520 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ * Copyright (c) 2019-2020 Marvell International Ltd.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+#include <linux/etherdevice.h>
+#include <linux/qed/qed_chain.h>
+#include <linux/qed/qed_if.h>
+#include "qed.h"
+#include "qed_cxt.h"
+#include "qed_dcbx.h"
+#include "qed_dev_api.h"
+#include "qed_fcoe.h"
+#include "qed_hsi.h"
+#include "qed_iro_hsi.h"
+#include "qed_hw.h"
+#include "qed_init_ops.h"
+#include "qed_int.h"
+#include "qed_iscsi.h"
+#include "qed_ll2.h"
+#include "qed_mcp.h"
+#include "qed_ooo.h"
+#include "qed_reg_addr.h"
+#include "qed_sp.h"
+#include "qed_sriov.h"
+#include "qed_vf.h"
+#include "qed_rdma.h"
+#include "qed_nvmetcp.h"
+
+static DEFINE_SPINLOCK(qm_lock);
+
+/******************** Doorbell Recovery *******************/
+/* The doorbell recovery mechanism consists of a list of entries which represent
+ * doorbelling entities (l2 queues, roce sq/rq/cqs, the slowpath spq, etc). Each
+ * entity needs to register with the mechanism and provide the parameters
+ * describing it's doorbell, including a location where last used doorbell data
+ * can be found. The doorbell execute function will traverse the list and
+ * doorbell all of the registered entries.
+ */
+struct qed_db_recovery_entry {
+ struct list_head list_entry;
+ void __iomem *db_addr;
+ void *db_data;
+ enum qed_db_rec_width db_width;
+ enum qed_db_rec_space db_space;
+ u8 hwfn_idx;
+};
+
+/* Display a single doorbell recovery entry */
+static void qed_db_recovery_dp_entry(struct qed_hwfn *p_hwfn,
+ struct qed_db_recovery_entry *db_entry,
+ char *action)
+{
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SPQ,
+ "(%s: db_entry %p, addr %p, data %p, width %s, %s space, hwfn %d)\n",
+ action,
+ db_entry,
+ db_entry->db_addr,
+ db_entry->db_data,
+ db_entry->db_width == DB_REC_WIDTH_32B ? "32b" : "64b",
+ db_entry->db_space == DB_REC_USER ? "user" : "kernel",
+ db_entry->hwfn_idx);
+}
+
+/* Doorbell address sanity (address within doorbell bar range) */
+static bool qed_db_rec_sanity(struct qed_dev *cdev,
+ void __iomem *db_addr,
+ enum qed_db_rec_width db_width,
+ void *db_data)
+{
+ u32 width = (db_width == DB_REC_WIDTH_32B) ? 32 : 64;
+
+ /* Make sure doorbell address is within the doorbell bar */
+ if (db_addr < cdev->doorbells ||
+ (u8 __iomem *)db_addr + width >
+ (u8 __iomem *)cdev->doorbells + cdev->db_size) {
+ WARN(true,
+ "Illegal doorbell address: %p. Legal range for doorbell addresses is [%p..%p]\n",
+ db_addr,
+ cdev->doorbells,
+ (u8 __iomem *)cdev->doorbells + cdev->db_size);
+ return false;
+ }
+
+ /* ake sure doorbell data pointer is not null */
+ if (!db_data) {
+ WARN(true, "Illegal doorbell data pointer: %p", db_data);
+ return false;
+ }
+
+ return true;
+}
+
+/* Find hwfn according to the doorbell address */
+static struct qed_hwfn *qed_db_rec_find_hwfn(struct qed_dev *cdev,
+ void __iomem *db_addr)
+{
+ struct qed_hwfn *p_hwfn;
+
+ /* In CMT doorbell bar is split down the middle between engine 0 and enigne 1 */
+ if (cdev->num_hwfns > 1)
+ p_hwfn = db_addr < cdev->hwfns[1].doorbells ?
+ &cdev->hwfns[0] : &cdev->hwfns[1];
+ else
+ p_hwfn = QED_LEADING_HWFN(cdev);
+
+ return p_hwfn;
+}
+
+/* Add a new entry to the doorbell recovery mechanism */
+int qed_db_recovery_add(struct qed_dev *cdev,
+ void __iomem *db_addr,
+ void *db_data,
+ enum qed_db_rec_width db_width,
+ enum qed_db_rec_space db_space)
+{
+ struct qed_db_recovery_entry *db_entry;
+ struct qed_hwfn *p_hwfn;
+
+ /* Shortcircuit VFs, for now */
+ if (IS_VF(cdev)) {
+ DP_VERBOSE(cdev,
+ QED_MSG_IOV, "db recovery - skipping VF doorbell\n");
+ return 0;
+ }
+
+ /* Sanitize doorbell address */
+ if (!qed_db_rec_sanity(cdev, db_addr, db_width, db_data))
+ return -EINVAL;
+
+ /* Obtain hwfn from doorbell address */
+ p_hwfn = qed_db_rec_find_hwfn(cdev, db_addr);
+
+ /* Create entry */
+ db_entry = kzalloc(sizeof(*db_entry), GFP_KERNEL);
+ if (!db_entry) {
+ DP_NOTICE(cdev, "Failed to allocate a db recovery entry\n");
+ return -ENOMEM;
+ }
+
+ /* Populate entry */
+ db_entry->db_addr = db_addr;
+ db_entry->db_data = db_data;
+ db_entry->db_width = db_width;
+ db_entry->db_space = db_space;
+ db_entry->hwfn_idx = p_hwfn->my_id;
+
+ /* Display */
+ qed_db_recovery_dp_entry(p_hwfn, db_entry, "Adding");
+
+ /* Protect the list */
+ spin_lock_bh(&p_hwfn->db_recovery_info.lock);
+ list_add_tail(&db_entry->list_entry, &p_hwfn->db_recovery_info.list);
+ spin_unlock_bh(&p_hwfn->db_recovery_info.lock);
+
+ return 0;
+}
+
+/* Remove an entry from the doorbell recovery mechanism */
+int qed_db_recovery_del(struct qed_dev *cdev,
+ void __iomem *db_addr, void *db_data)
+{
+ struct qed_db_recovery_entry *db_entry = NULL;
+ struct qed_hwfn *p_hwfn;
+ int rc = -EINVAL;
+
+ /* Shortcircuit VFs, for now */
+ if (IS_VF(cdev)) {
+ DP_VERBOSE(cdev,
+ QED_MSG_IOV, "db recovery - skipping VF doorbell\n");
+ return 0;
+ }
+
+ /* Obtain hwfn from doorbell address */
+ p_hwfn = qed_db_rec_find_hwfn(cdev, db_addr);
+
+ /* Protect the list */
+ spin_lock_bh(&p_hwfn->db_recovery_info.lock);
+ list_for_each_entry(db_entry,
+ &p_hwfn->db_recovery_info.list, list_entry) {
+ /* search according to db_data addr since db_addr is not unique (roce) */
+ if (db_entry->db_data == db_data) {
+ qed_db_recovery_dp_entry(p_hwfn, db_entry, "Deleting");
+ list_del(&db_entry->list_entry);
+ rc = 0;
+ break;
+ }
+ }
+
+ spin_unlock_bh(&p_hwfn->db_recovery_info.lock);
+
+ if (rc == -EINVAL)
+
+ DP_NOTICE(p_hwfn,
+ "Failed to find element in list. Key (db_data addr) was %p. db_addr was %p\n",
+ db_data, db_addr);
+ else
+ kfree(db_entry);
+
+ return rc;
+}
+
+/* Initialize the doorbell recovery mechanism */
+static int qed_db_recovery_setup(struct qed_hwfn *p_hwfn)
+{
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Setting up db recovery\n");
+
+ /* Make sure db_size was set in cdev */
+ if (!p_hwfn->cdev->db_size) {
+ DP_ERR(p_hwfn->cdev, "db_size not set\n");
+ return -EINVAL;
+ }
+
+ INIT_LIST_HEAD(&p_hwfn->db_recovery_info.list);
+ spin_lock_init(&p_hwfn->db_recovery_info.lock);
+ p_hwfn->db_recovery_info.db_recovery_counter = 0;
+
+ return 0;
+}
+
+/* Destroy the doorbell recovery mechanism */
+static void qed_db_recovery_teardown(struct qed_hwfn *p_hwfn)
+{
+ struct qed_db_recovery_entry *db_entry = NULL;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Tearing down db recovery\n");
+ if (!list_empty(&p_hwfn->db_recovery_info.list)) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SPQ,
+ "Doorbell Recovery teardown found the doorbell recovery list was not empty (Expected in disorderly driver unload (e.g. recovery) otherwise this probably means some flow forgot to db_recovery_del). Prepare to purge doorbell recovery list...\n");
+ while (!list_empty(&p_hwfn->db_recovery_info.list)) {
+ db_entry =
+ list_first_entry(&p_hwfn->db_recovery_info.list,
+ struct qed_db_recovery_entry,
+ list_entry);
+ qed_db_recovery_dp_entry(p_hwfn, db_entry, "Purging");
+ list_del(&db_entry->list_entry);
+ kfree(db_entry);
+ }
+ }
+ p_hwfn->db_recovery_info.db_recovery_counter = 0;
+}
+
+/* Print the content of the doorbell recovery mechanism */
+void qed_db_recovery_dp(struct qed_hwfn *p_hwfn)
+{
+ struct qed_db_recovery_entry *db_entry = NULL;
+
+ DP_NOTICE(p_hwfn,
+ "Displaying doorbell recovery database. Counter was %d\n",
+ p_hwfn->db_recovery_info.db_recovery_counter);
+
+ /* Protect the list */
+ spin_lock_bh(&p_hwfn->db_recovery_info.lock);
+ list_for_each_entry(db_entry,
+ &p_hwfn->db_recovery_info.list, list_entry) {
+ qed_db_recovery_dp_entry(p_hwfn, db_entry, "Printing");
+ }
+
+ spin_unlock_bh(&p_hwfn->db_recovery_info.lock);
+}
+
+/* Ring the doorbell of a single doorbell recovery entry */
+static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn,
+ struct qed_db_recovery_entry *db_entry)
+{
+ /* Print according to width */
+ if (db_entry->db_width == DB_REC_WIDTH_32B) {
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+ "ringing doorbell address %p data %x\n",
+ db_entry->db_addr,
+ *(u32 *)db_entry->db_data);
+ } else {
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+ "ringing doorbell address %p data %llx\n",
+ db_entry->db_addr,
+ *(u64 *)(db_entry->db_data));
+ }
+
+ /* Sanity */
+ if (!qed_db_rec_sanity(p_hwfn->cdev, db_entry->db_addr,
+ db_entry->db_width, db_entry->db_data))
+ return;
+
+ /* Flush the write combined buffer. Since there are multiple doorbelling
+ * entities using the same address, if we don't flush, a transaction
+ * could be lost.
+ */
+ wmb();
+
+ /* Ring the doorbell */
+ if (db_entry->db_width == DB_REC_WIDTH_32B)
+ DIRECT_REG_WR(db_entry->db_addr,
+ *(u32 *)(db_entry->db_data));
+ else
+ DIRECT_REG_WR64(db_entry->db_addr,
+ *(u64 *)(db_entry->db_data));
+
+ /* Flush the write combined buffer. Next doorbell may come from a
+ * different entity to the same address...
+ */
+ wmb();
+}
+
+/* Traverse the doorbell recovery entry list and ring all the doorbells */
+void qed_db_recovery_execute(struct qed_hwfn *p_hwfn)
+{
+ struct qed_db_recovery_entry *db_entry = NULL;
+
+ DP_NOTICE(p_hwfn, "Executing doorbell recovery. Counter was %d\n",
+ p_hwfn->db_recovery_info.db_recovery_counter);
+
+ /* Track amount of times recovery was executed */
+ p_hwfn->db_recovery_info.db_recovery_counter++;
+
+ /* Protect the list */
+ spin_lock_bh(&p_hwfn->db_recovery_info.lock);
+ list_for_each_entry(db_entry,
+ &p_hwfn->db_recovery_info.list, list_entry)
+ qed_db_recovery_ring(p_hwfn, db_entry);
+ spin_unlock_bh(&p_hwfn->db_recovery_info.lock);
+}
+
+/******************** Doorbell Recovery end ****************/
+
+/********************************** NIG LLH ***********************************/
+
+enum qed_llh_filter_type {
+ QED_LLH_FILTER_TYPE_MAC,
+ QED_LLH_FILTER_TYPE_PROTOCOL,
+};
+
+struct qed_llh_mac_filter {
+ u8 addr[ETH_ALEN];
+};
+
+struct qed_llh_protocol_filter {
+ enum qed_llh_prot_filter_type_t type;
+ u16 source_port_or_eth_type;
+ u16 dest_port;
+};
+
+union qed_llh_filter {
+ struct qed_llh_mac_filter mac;
+ struct qed_llh_protocol_filter protocol;
+};
+
+struct qed_llh_filter_info {
+ bool b_enabled;
+ u32 ref_cnt;
+ enum qed_llh_filter_type type;
+ union qed_llh_filter filter;
+};
+
+struct qed_llh_info {
+ /* Number of LLH filters banks */
+ u8 num_ppfid;
+
+#define MAX_NUM_PPFID 8
+ u8 ppfid_array[MAX_NUM_PPFID];
+
+ /* Array of filters arrays:
+ * "num_ppfid" elements of filters banks, where each is an array of
+ * "NIG_REG_LLH_FUNC_FILTER_EN_SIZE" filters.
+ */
+ struct qed_llh_filter_info **pp_filters;
+};
+
+static void qed_llh_free(struct qed_dev *cdev)
+{
+ struct qed_llh_info *p_llh_info = cdev->p_llh_info;
+ u32 i;
+
+ if (p_llh_info) {
+ if (p_llh_info->pp_filters)
+ for (i = 0; i < p_llh_info->num_ppfid; i++)
+ kfree(p_llh_info->pp_filters[i]);
+
+ kfree(p_llh_info->pp_filters);
+ }
+
+ kfree(p_llh_info);
+ cdev->p_llh_info = NULL;
+}
+
+static int qed_llh_alloc(struct qed_dev *cdev)
+{
+ struct qed_llh_info *p_llh_info;
+ u32 size, i;
+
+ p_llh_info = kzalloc(sizeof(*p_llh_info), GFP_KERNEL);
+ if (!p_llh_info)
+ return -ENOMEM;
+ cdev->p_llh_info = p_llh_info;
+
+ for (i = 0; i < MAX_NUM_PPFID; i++) {
+ if (!(cdev->ppfid_bitmap & (0x1 << i)))
+ continue;
+
+ p_llh_info->ppfid_array[p_llh_info->num_ppfid] = i;
+ DP_VERBOSE(cdev, QED_MSG_SP, "ppfid_array[%d] = %u\n",
+ p_llh_info->num_ppfid, i);
+ p_llh_info->num_ppfid++;
+ }
+
+ size = p_llh_info->num_ppfid * sizeof(*p_llh_info->pp_filters);
+ p_llh_info->pp_filters = kzalloc(size, GFP_KERNEL);
+ if (!p_llh_info->pp_filters)
+ return -ENOMEM;
+
+ size = NIG_REG_LLH_FUNC_FILTER_EN_SIZE *
+ sizeof(**p_llh_info->pp_filters);
+ for (i = 0; i < p_llh_info->num_ppfid; i++) {
+ p_llh_info->pp_filters[i] = kzalloc(size, GFP_KERNEL);
+ if (!p_llh_info->pp_filters[i])
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int qed_llh_shadow_sanity(struct qed_dev *cdev,
+ u8 ppfid, u8 filter_idx, const char *action)
+{
+ struct qed_llh_info *p_llh_info = cdev->p_llh_info;
+
+ if (ppfid >= p_llh_info->num_ppfid) {
+ DP_NOTICE(cdev,
+ "LLH shadow [%s]: using ppfid %d while only %d ppfids are available\n",
+ action, ppfid, p_llh_info->num_ppfid);
+ return -EINVAL;
+ }
+
+ if (filter_idx >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) {
+ DP_NOTICE(cdev,
+ "LLH shadow [%s]: using filter_idx %d while only %d filters are available\n",
+ action, filter_idx, NIG_REG_LLH_FUNC_FILTER_EN_SIZE);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#define QED_LLH_INVALID_FILTER_IDX 0xff
+
+static int
+qed_llh_shadow_search_filter(struct qed_dev *cdev,
+ u8 ppfid,
+ union qed_llh_filter *p_filter, u8 *p_filter_idx)
+{
+ struct qed_llh_info *p_llh_info = cdev->p_llh_info;
+ struct qed_llh_filter_info *p_filters;
+ int rc;
+ u8 i;
+
+ rc = qed_llh_shadow_sanity(cdev, ppfid, 0, "search");
+ if (rc)
+ return rc;
+
+ *p_filter_idx = QED_LLH_INVALID_FILTER_IDX;
+
+ p_filters = p_llh_info->pp_filters[ppfid];
+ for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
+ if (!memcmp(p_filter, &p_filters[i].filter,
+ sizeof(*p_filter))) {
+ *p_filter_idx = i;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int
+qed_llh_shadow_get_free_idx(struct qed_dev *cdev, u8 ppfid, u8 *p_filter_idx)
+{
+ struct qed_llh_info *p_llh_info = cdev->p_llh_info;
+ struct qed_llh_filter_info *p_filters;
+ int rc;
+ u8 i;
+
+ rc = qed_llh_shadow_sanity(cdev, ppfid, 0, "get_free_idx");
+ if (rc)
+ return rc;
+
+ *p_filter_idx = QED_LLH_INVALID_FILTER_IDX;
+
+ p_filters = p_llh_info->pp_filters[ppfid];
+ for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
+ if (!p_filters[i].b_enabled) {
+ *p_filter_idx = i;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int
+__qed_llh_shadow_add_filter(struct qed_dev *cdev,
+ u8 ppfid,
+ u8 filter_idx,
+ enum qed_llh_filter_type type,
+ union qed_llh_filter *p_filter, u32 *p_ref_cnt)
+{
+ struct qed_llh_info *p_llh_info = cdev->p_llh_info;
+ struct qed_llh_filter_info *p_filters;
+ int rc;
+
+ rc = qed_llh_shadow_sanity(cdev, ppfid, filter_idx, "add");
+ if (rc)
+ return rc;
+
+ p_filters = p_llh_info->pp_filters[ppfid];
+ if (!p_filters[filter_idx].ref_cnt) {
+ p_filters[filter_idx].b_enabled = true;
+ p_filters[filter_idx].type = type;
+ memcpy(&p_filters[filter_idx].filter, p_filter,
+ sizeof(p_filters[filter_idx].filter));
+ }
+
+ *p_ref_cnt = ++p_filters[filter_idx].ref_cnt;
+
+ return 0;
+}
+
+static int
+qed_llh_shadow_add_filter(struct qed_dev *cdev,
+ u8 ppfid,
+ enum qed_llh_filter_type type,
+ union qed_llh_filter *p_filter,
+ u8 *p_filter_idx, u32 *p_ref_cnt)
+{
+ int rc;
+
+ /* Check if the same filter already exist */
+ rc = qed_llh_shadow_search_filter(cdev, ppfid, p_filter, p_filter_idx);
+ if (rc)
+ return rc;
+
+ /* Find a new entry in case of a new filter */
+ if (*p_filter_idx == QED_LLH_INVALID_FILTER_IDX) {
+ rc = qed_llh_shadow_get_free_idx(cdev, ppfid, p_filter_idx);
+ if (rc)
+ return rc;
+ }
+
+ /* No free entry was found */
+ if (*p_filter_idx == QED_LLH_INVALID_FILTER_IDX) {
+ DP_NOTICE(cdev,
+ "Failed to find an empty LLH filter to utilize [ppfid %d]\n",
+ ppfid);
+ return -EINVAL;
+ }
+
+ return __qed_llh_shadow_add_filter(cdev, ppfid, *p_filter_idx, type,
+ p_filter, p_ref_cnt);
+}
+
+static int
+__qed_llh_shadow_remove_filter(struct qed_dev *cdev,
+ u8 ppfid, u8 filter_idx, u32 *p_ref_cnt)
+{
+ struct qed_llh_info *p_llh_info = cdev->p_llh_info;
+ struct qed_llh_filter_info *p_filters;
+ int rc;
+
+ rc = qed_llh_shadow_sanity(cdev, ppfid, filter_idx, "remove");
+ if (rc)
+ return rc;
+
+ p_filters = p_llh_info->pp_filters[ppfid];
+ if (!p_filters[filter_idx].ref_cnt) {
+ DP_NOTICE(cdev,
+ "LLH shadow: trying to remove a filter with ref_cnt=0\n");
+ return -EINVAL;
+ }
+
+ *p_ref_cnt = --p_filters[filter_idx].ref_cnt;
+ if (!p_filters[filter_idx].ref_cnt)
+ memset(&p_filters[filter_idx],
+ 0, sizeof(p_filters[filter_idx]));
+
+ return 0;
+}
+
+static int
+qed_llh_shadow_remove_filter(struct qed_dev *cdev,
+ u8 ppfid,
+ union qed_llh_filter *p_filter,
+ u8 *p_filter_idx, u32 *p_ref_cnt)
+{
+ int rc;
+
+ rc = qed_llh_shadow_search_filter(cdev, ppfid, p_filter, p_filter_idx);
+ if (rc)
+ return rc;
+
+ /* No matching filter was found */
+ if (*p_filter_idx == QED_LLH_INVALID_FILTER_IDX) {
+ DP_NOTICE(cdev, "Failed to find a filter in the LLH shadow\n");
+ return -EINVAL;
+ }
+
+ return __qed_llh_shadow_remove_filter(cdev, ppfid, *p_filter_idx,
+ p_ref_cnt);
+}
+
+static int qed_llh_abs_ppfid(struct qed_dev *cdev, u8 ppfid, u8 *p_abs_ppfid)
+{
+ struct qed_llh_info *p_llh_info = cdev->p_llh_info;
+
+ if (ppfid >= p_llh_info->num_ppfid) {
+ DP_NOTICE(cdev,
+ "ppfid %d is not valid, available indices are 0..%d\n",
+ ppfid, p_llh_info->num_ppfid - 1);
+ *p_abs_ppfid = 0;
+ return -EINVAL;
+ }
+
+ *p_abs_ppfid = p_llh_info->ppfid_array[ppfid];
+
+ return 0;
+}
+
+static int
+qed_llh_set_engine_affin(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_dev *cdev = p_hwfn->cdev;
+ enum qed_eng eng;
+ u8 ppfid;
+ int rc;
+
+ rc = qed_mcp_get_engine_config(p_hwfn, p_ptt);
+ if (rc != 0 && rc != -EOPNOTSUPP) {
+ DP_NOTICE(p_hwfn,
+ "Failed to get the engine affinity configuration\n");
+ return rc;
+ }
+
+ /* RoCE PF is bound to a single engine */
+ if (QED_IS_ROCE_PERSONALITY(p_hwfn)) {
+ eng = cdev->fir_affin ? QED_ENG1 : QED_ENG0;
+ rc = qed_llh_set_roce_affinity(cdev, eng);
+ if (rc) {
+ DP_NOTICE(cdev,
+ "Failed to set the RoCE engine affinity\n");
+ return rc;
+ }
+
+ DP_VERBOSE(cdev,
+ QED_MSG_SP,
+ "LLH: Set the engine affinity of RoCE packets as %d\n",
+ eng);
+ }
+
+ /* Storage PF is bound to a single engine while L2 PF uses both */
+ if (QED_IS_FCOE_PERSONALITY(p_hwfn) || QED_IS_ISCSI_PERSONALITY(p_hwfn) ||
+ QED_IS_NVMETCP_PERSONALITY(p_hwfn))
+ eng = cdev->fir_affin ? QED_ENG1 : QED_ENG0;
+ else /* L2_PERSONALITY */
+ eng = QED_BOTH_ENG;
+
+ for (ppfid = 0; ppfid < cdev->p_llh_info->num_ppfid; ppfid++) {
+ rc = qed_llh_set_ppfid_affinity(cdev, ppfid, eng);
+ if (rc) {
+ DP_NOTICE(cdev,
+ "Failed to set the engine affinity of ppfid %d\n",
+ ppfid);
+ return rc;
+ }
+ }
+
+ DP_VERBOSE(cdev, QED_MSG_SP,
+ "LLH: Set the engine affinity of non-RoCE packets as %d\n",
+ eng);
+
+ return 0;
+}
+
+static int qed_llh_hw_init_pf(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct qed_dev *cdev = p_hwfn->cdev;
+ u8 ppfid, abs_ppfid;
+ int rc;
+
+ for (ppfid = 0; ppfid < cdev->p_llh_info->num_ppfid; ppfid++) {
+ u32 addr;
+
+ rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid);
+ if (rc)
+ return rc;
+
+ addr = NIG_REG_LLH_PPFID2PFID_TBL_0 + abs_ppfid * 0x4;
+ qed_wr(p_hwfn, p_ptt, addr, p_hwfn->rel_pf_id);
+ }
+
+ if (test_bit(QED_MF_LLH_MAC_CLSS, &cdev->mf_bits) &&
+ !QED_IS_FCOE_PERSONALITY(p_hwfn)) {
+ rc = qed_llh_add_mac_filter(cdev, 0,
+ p_hwfn->hw_info.hw_mac_addr);
+ if (rc)
+ DP_NOTICE(cdev,
+ "Failed to add an LLH filter with the primary MAC\n");
+ }
+
+ if (QED_IS_CMT(cdev)) {
+ rc = qed_llh_set_engine_affin(p_hwfn, p_ptt);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+u8 qed_llh_get_num_ppfid(struct qed_dev *cdev)
+{
+ return cdev->p_llh_info->num_ppfid;
+}
+
+#define NIG_REG_PPF_TO_ENGINE_SEL_ROCE_MASK 0x3
+#define NIG_REG_PPF_TO_ENGINE_SEL_ROCE_SHIFT 0
+#define NIG_REG_PPF_TO_ENGINE_SEL_NON_ROCE_MASK 0x3
+#define NIG_REG_PPF_TO_ENGINE_SEL_NON_ROCE_SHIFT 2
+
+int qed_llh_set_ppfid_affinity(struct qed_dev *cdev, u8 ppfid, enum qed_eng eng)
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
+ u32 addr, val, eng_sel;
+ u8 abs_ppfid;
+ int rc = 0;
+
+ if (!p_ptt)
+ return -EAGAIN;
+
+ if (!QED_IS_CMT(cdev))
+ goto out;
+
+ rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid);
+ if (rc)
+ goto out;
+
+ switch (eng) {
+ case QED_ENG0:
+ eng_sel = 0;
+ break;
+ case QED_ENG1:
+ eng_sel = 1;
+ break;
+ case QED_BOTH_ENG:
+ eng_sel = 2;
+ break;
+ default:
+ DP_NOTICE(cdev, "Invalid affinity value for ppfid [%d]\n", eng);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ addr = NIG_REG_PPF_TO_ENGINE_SEL + abs_ppfid * 0x4;
+ val = qed_rd(p_hwfn, p_ptt, addr);
+ SET_FIELD(val, NIG_REG_PPF_TO_ENGINE_SEL_NON_ROCE, eng_sel);
+ qed_wr(p_hwfn, p_ptt, addr, val);
+
+ /* The iWARP affinity is set as the affinity of ppfid 0 */
+ if (!ppfid && QED_IS_IWARP_PERSONALITY(p_hwfn))
+ cdev->iwarp_affin = (eng == QED_ENG1) ? 1 : 0;
+out:
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
+int qed_llh_set_roce_affinity(struct qed_dev *cdev, enum qed_eng eng)
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
+ u32 addr, val, eng_sel;
+ u8 ppfid, abs_ppfid;
+ int rc = 0;
+
+ if (!p_ptt)
+ return -EAGAIN;
+
+ if (!QED_IS_CMT(cdev))
+ goto out;
+
+ switch (eng) {
+ case QED_ENG0:
+ eng_sel = 0;
+ break;
+ case QED_ENG1:
+ eng_sel = 1;
+ break;
+ case QED_BOTH_ENG:
+ eng_sel = 2;
+ qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_ENG_CLS_ROCE_QP_SEL,
+ 0xf); /* QP bit 15 */
+ break;
+ default:
+ DP_NOTICE(cdev, "Invalid affinity value for RoCE [%d]\n", eng);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ for (ppfid = 0; ppfid < cdev->p_llh_info->num_ppfid; ppfid++) {
+ rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid);
+ if (rc)
+ goto out;
+
+ addr = NIG_REG_PPF_TO_ENGINE_SEL + abs_ppfid * 0x4;
+ val = qed_rd(p_hwfn, p_ptt, addr);
+ SET_FIELD(val, NIG_REG_PPF_TO_ENGINE_SEL_ROCE, eng_sel);
+ qed_wr(p_hwfn, p_ptt, addr, val);
+ }
+out:
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
+struct qed_llh_filter_details {
+ u64 value;
+ u32 mode;
+ u32 protocol_type;
+ u32 hdr_sel;
+ u32 enable;
+};
+
+static int
+qed_llh_access_filter(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u8 abs_ppfid,
+ u8 filter_idx,
+ struct qed_llh_filter_details *p_details)
+{
+ struct qed_dmae_params params = {0};
+ u32 addr;
+ u8 pfid;
+ int rc;
+
+ /* The NIG/LLH registers that are accessed in this function have only 16
+ * rows which are exposed to a PF. I.e. only the 16 filters of its
+ * default ppfid. Accessing filters of other ppfids requires pretending
+ * to another PFs.
+ * The calculation of PPFID->PFID in AH is based on the relative index
+ * of a PF on its port.
+ * For BB the pfid is actually the abs_ppfid.
+ */
+ if (QED_IS_BB(p_hwfn->cdev))
+ pfid = abs_ppfid;
+ else
+ pfid = abs_ppfid * p_hwfn->cdev->num_ports_in_engine +
+ MFW_PORT(p_hwfn);
+
+ /* Filter enable - should be done first when removing a filter */
+ if (!p_details->enable) {
+ qed_fid_pretend(p_hwfn, p_ptt,
+ pfid << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT);
+
+ addr = NIG_REG_LLH_FUNC_FILTER_EN + filter_idx * 0x4;
+ qed_wr(p_hwfn, p_ptt, addr, p_details->enable);
+
+ qed_fid_pretend(p_hwfn, p_ptt,
+ p_hwfn->rel_pf_id <<
+ PXP_PRETEND_CONCRETE_FID_PFID_SHIFT);
+ }
+
+ /* Filter value */
+ addr = NIG_REG_LLH_FUNC_FILTER_VALUE + 2 * filter_idx * 0x4;
+
+ SET_FIELD(params.flags, QED_DMAE_PARAMS_DST_PF_VALID, 0x1);
+ params.dst_pfid = pfid;
+ rc = qed_dmae_host2grc(p_hwfn,
+ p_ptt,
+ (u64)(uintptr_t)&p_details->value,
+ addr, 2 /* size_in_dwords */,
+ &params);
+ if (rc)
+ return rc;
+
+ qed_fid_pretend(p_hwfn, p_ptt,
+ pfid << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT);
+
+ /* Filter mode */
+ addr = NIG_REG_LLH_FUNC_FILTER_MODE + filter_idx * 0x4;
+ qed_wr(p_hwfn, p_ptt, addr, p_details->mode);
+
+ /* Filter protocol type */
+ addr = NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE + filter_idx * 0x4;
+ qed_wr(p_hwfn, p_ptt, addr, p_details->protocol_type);
+
+ /* Filter header select */
+ addr = NIG_REG_LLH_FUNC_FILTER_HDR_SEL + filter_idx * 0x4;
+ qed_wr(p_hwfn, p_ptt, addr, p_details->hdr_sel);
+
+ /* Filter enable - should be done last when adding a filter */
+ if (p_details->enable) {
+ addr = NIG_REG_LLH_FUNC_FILTER_EN + filter_idx * 0x4;
+ qed_wr(p_hwfn, p_ptt, addr, p_details->enable);
+ }
+
+ qed_fid_pretend(p_hwfn, p_ptt,
+ p_hwfn->rel_pf_id <<
+ PXP_PRETEND_CONCRETE_FID_PFID_SHIFT);
+
+ return 0;
+}
+
+static int
+qed_llh_add_filter(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u8 abs_ppfid,
+ u8 filter_idx, u8 filter_prot_type, u32 high, u32 low)
+{
+ struct qed_llh_filter_details filter_details;
+
+ filter_details.enable = 1;
+ filter_details.value = ((u64)high << 32) | low;
+ filter_details.hdr_sel = 0;
+ filter_details.protocol_type = filter_prot_type;
+ /* Mode: 0: MAC-address classification 1: protocol classification */
+ filter_details.mode = filter_prot_type ? 1 : 0;
+
+ return qed_llh_access_filter(p_hwfn, p_ptt, abs_ppfid, filter_idx,
+ &filter_details);
+}
+
+static int
+qed_llh_remove_filter(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 abs_ppfid, u8 filter_idx)
+{
+ struct qed_llh_filter_details filter_details = {0};
+
+ return qed_llh_access_filter(p_hwfn, p_ptt, abs_ppfid, filter_idx,
+ &filter_details);
+}
+
+int qed_llh_add_mac_filter(struct qed_dev *cdev,
+ u8 ppfid, const u8 mac_addr[ETH_ALEN])
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
+ union qed_llh_filter filter = {};
+ u8 filter_idx, abs_ppfid = 0;
+ u32 high, low, ref_cnt;
+ int rc = 0;
+
+ if (!p_ptt)
+ return -EAGAIN;
+
+ if (!test_bit(QED_MF_LLH_MAC_CLSS, &cdev->mf_bits))
+ goto out;
+
+ memcpy(filter.mac.addr, mac_addr, ETH_ALEN);
+ rc = qed_llh_shadow_add_filter(cdev, ppfid,
+ QED_LLH_FILTER_TYPE_MAC,
+ &filter, &filter_idx, &ref_cnt);
+ if (rc)
+ goto err;
+
+ /* Configure the LLH only in case of a new the filter */
+ if (ref_cnt == 1) {
+ rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid);
+ if (rc)
+ goto err;
+
+ high = mac_addr[1] | (mac_addr[0] << 8);
+ low = mac_addr[5] | (mac_addr[4] << 8) | (mac_addr[3] << 16) |
+ (mac_addr[2] << 24);
+ rc = qed_llh_add_filter(p_hwfn, p_ptt, abs_ppfid, filter_idx,
+ 0, high, low);
+ if (rc)
+ goto err;
+ }
+
+ DP_VERBOSE(cdev,
+ QED_MSG_SP,
+ "LLH: Added MAC filter [%pM] to ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n",
+ mac_addr, ppfid, abs_ppfid, filter_idx, ref_cnt);
+
+ goto out;
+
+err: DP_NOTICE(cdev,
+ "LLH: Failed to add MAC filter [%pM] to ppfid %hhd\n",
+ mac_addr, ppfid);
+out:
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
+static int
+qed_llh_protocol_filter_stringify(struct qed_dev *cdev,
+ enum qed_llh_prot_filter_type_t type,
+ u16 source_port_or_eth_type,
+ u16 dest_port, u8 *str, size_t str_len)
+{
+ switch (type) {
+ case QED_LLH_FILTER_ETHERTYPE:
+ snprintf(str, str_len, "Ethertype 0x%04x",
+ source_port_or_eth_type);
+ break;
+ case QED_LLH_FILTER_TCP_SRC_PORT:
+ snprintf(str, str_len, "TCP src port 0x%04x",
+ source_port_or_eth_type);
+ break;
+ case QED_LLH_FILTER_UDP_SRC_PORT:
+ snprintf(str, str_len, "UDP src port 0x%04x",
+ source_port_or_eth_type);
+ break;
+ case QED_LLH_FILTER_TCP_DEST_PORT:
+ snprintf(str, str_len, "TCP dst port 0x%04x", dest_port);
+ break;
+ case QED_LLH_FILTER_UDP_DEST_PORT:
+ snprintf(str, str_len, "UDP dst port 0x%04x", dest_port);
+ break;
+ case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
+ snprintf(str, str_len, "TCP src/dst ports 0x%04x/0x%04x",
+ source_port_or_eth_type, dest_port);
+ break;
+ case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
+ snprintf(str, str_len, "UDP src/dst ports 0x%04x/0x%04x",
+ source_port_or_eth_type, dest_port);
+ break;
+ default:
+ DP_NOTICE(cdev,
+ "Non valid LLH protocol filter type %d\n", type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+qed_llh_protocol_filter_to_hilo(struct qed_dev *cdev,
+ enum qed_llh_prot_filter_type_t type,
+ u16 source_port_or_eth_type,
+ u16 dest_port, u32 *p_high, u32 *p_low)
+{
+ *p_high = 0;
+ *p_low = 0;
+
+ switch (type) {
+ case QED_LLH_FILTER_ETHERTYPE:
+ *p_high = source_port_or_eth_type;
+ break;
+ case QED_LLH_FILTER_TCP_SRC_PORT:
+ case QED_LLH_FILTER_UDP_SRC_PORT:
+ *p_low = source_port_or_eth_type << 16;
+ break;
+ case QED_LLH_FILTER_TCP_DEST_PORT:
+ case QED_LLH_FILTER_UDP_DEST_PORT:
+ *p_low = dest_port;
+ break;
+ case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT:
+ case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT:
+ *p_low = (source_port_or_eth_type << 16) | dest_port;
+ break;
+ default:
+ DP_NOTICE(cdev,
+ "Non valid LLH protocol filter type %d\n", type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int
+qed_llh_add_protocol_filter(struct qed_dev *cdev,
+ u8 ppfid,
+ enum qed_llh_prot_filter_type_t type,
+ u16 source_port_or_eth_type, u16 dest_port)
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
+ u8 filter_idx, abs_ppfid, str[32], type_bitmap;
+ union qed_llh_filter filter = {};
+ u32 high, low, ref_cnt;
+ int rc = 0;
+
+ if (!p_ptt)
+ return -EAGAIN;
+
+ if (!test_bit(QED_MF_LLH_PROTO_CLSS, &cdev->mf_bits))
+ goto out;
+
+ rc = qed_llh_protocol_filter_stringify(cdev, type,
+ source_port_or_eth_type,
+ dest_port, str, sizeof(str));
+ if (rc)
+ goto err;
+
+ filter.protocol.type = type;
+ filter.protocol.source_port_or_eth_type = source_port_or_eth_type;
+ filter.protocol.dest_port = dest_port;
+ rc = qed_llh_shadow_add_filter(cdev,
+ ppfid,
+ QED_LLH_FILTER_TYPE_PROTOCOL,
+ &filter, &filter_idx, &ref_cnt);
+ if (rc)
+ goto err;
+
+ rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid);
+ if (rc)
+ goto err;
+
+ /* Configure the LLH only in case of a new the filter */
+ if (ref_cnt == 1) {
+ rc = qed_llh_protocol_filter_to_hilo(cdev, type,
+ source_port_or_eth_type,
+ dest_port, &high, &low);
+ if (rc)
+ goto err;
+
+ type_bitmap = 0x1 << type;
+ rc = qed_llh_add_filter(p_hwfn, p_ptt, abs_ppfid,
+ filter_idx, type_bitmap, high, low);
+ if (rc)
+ goto err;
+ }
+
+ DP_VERBOSE(cdev,
+ QED_MSG_SP,
+ "LLH: Added protocol filter [%s] to ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n",
+ str, ppfid, abs_ppfid, filter_idx, ref_cnt);
+
+ goto out;
+
+err: DP_NOTICE(p_hwfn,
+ "LLH: Failed to add protocol filter [%s] to ppfid %hhd\n",
+ str, ppfid);
+out:
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
+void qed_llh_remove_mac_filter(struct qed_dev *cdev,
+ u8 ppfid, u8 mac_addr[ETH_ALEN])
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
+ union qed_llh_filter filter = {};
+ u8 filter_idx, abs_ppfid;
+ int rc = 0;
+ u32 ref_cnt;
+
+ if (!p_ptt)
+ return;
+
+ if (!test_bit(QED_MF_LLH_MAC_CLSS, &cdev->mf_bits))
+ goto out;
+
+ if (QED_IS_NVMETCP_PERSONALITY(p_hwfn))
+ return;
+
+ ether_addr_copy(filter.mac.addr, mac_addr);
+ rc = qed_llh_shadow_remove_filter(cdev, ppfid, &filter, &filter_idx,
+ &ref_cnt);
+ if (rc)
+ goto err;
+
+ rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid);
+ if (rc)
+ goto err;
+
+ /* Remove from the LLH in case the filter is not in use */
+ if (!ref_cnt) {
+ rc = qed_llh_remove_filter(p_hwfn, p_ptt, abs_ppfid,
+ filter_idx);
+ if (rc)
+ goto err;
+ }
+
+ DP_VERBOSE(cdev,
+ QED_MSG_SP,
+ "LLH: Removed MAC filter [%pM] from ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n",
+ mac_addr, ppfid, abs_ppfid, filter_idx, ref_cnt);
+
+ goto out;
+
+err: DP_NOTICE(cdev,
+ "LLH: Failed to remove MAC filter [%pM] from ppfid %hhd\n",
+ mac_addr, ppfid);
+out:
+ qed_ptt_release(p_hwfn, p_ptt);
+}
+
+void qed_llh_remove_protocol_filter(struct qed_dev *cdev,
+ u8 ppfid,
+ enum qed_llh_prot_filter_type_t type,
+ u16 source_port_or_eth_type, u16 dest_port)
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
+ u8 filter_idx, abs_ppfid, str[32];
+ union qed_llh_filter filter = {};
+ int rc = 0;
+ u32 ref_cnt;
+
+ if (!p_ptt)
+ return;
+
+ if (!test_bit(QED_MF_LLH_PROTO_CLSS, &cdev->mf_bits))
+ goto out;
+
+ rc = qed_llh_protocol_filter_stringify(cdev, type,
+ source_port_or_eth_type,
+ dest_port, str, sizeof(str));
+ if (rc)
+ goto err;
+
+ filter.protocol.type = type;
+ filter.protocol.source_port_or_eth_type = source_port_or_eth_type;
+ filter.protocol.dest_port = dest_port;
+ rc = qed_llh_shadow_remove_filter(cdev, ppfid, &filter, &filter_idx,
+ &ref_cnt);
+ if (rc)
+ goto err;
+
+ rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid);
+ if (rc)
+ goto err;
+
+ /* Remove from the LLH in case the filter is not in use */
+ if (!ref_cnt) {
+ rc = qed_llh_remove_filter(p_hwfn, p_ptt, abs_ppfid,
+ filter_idx);
+ if (rc)
+ goto err;
+ }
+
+ DP_VERBOSE(cdev,
+ QED_MSG_SP,
+ "LLH: Removed protocol filter [%s] from ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n",
+ str, ppfid, abs_ppfid, filter_idx, ref_cnt);
+
+ goto out;
+
+err: DP_NOTICE(cdev,
+ "LLH: Failed to remove protocol filter [%s] from ppfid %hhd\n",
+ str, ppfid);
+out:
+ qed_ptt_release(p_hwfn, p_ptt);
+}
+
+/******************************* NIG LLH - End ********************************/
+
+#define QED_MIN_DPIS (4)
+#define QED_MIN_PWM_REGION (QED_WID_SIZE * QED_MIN_DPIS)
+
+static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, enum BAR_ID bar_id)
+{
+ u32 bar_reg = (bar_id == BAR_ID_0 ?
+ PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
+ u32 val;
+
+ if (IS_VF(p_hwfn->cdev))
+ return qed_vf_hw_bar_size(p_hwfn, bar_id);
+
+ val = qed_rd(p_hwfn, p_ptt, bar_reg);
+ if (val)
+ return 1 << (val + 15);
+
+ /* Old MFW initialized above registered only conditionally */
+ if (p_hwfn->cdev->num_hwfns > 1) {
+ DP_INFO(p_hwfn,
+ "BAR size not configured. Assuming BAR size of 256kB for GRC and 512kB for DB\n");
+ return BAR_ID_0 ? 256 * 1024 : 512 * 1024;
+ } else {
+ DP_INFO(p_hwfn,
+ "BAR size not configured. Assuming BAR size of 512kB for GRC and 512kB for DB\n");
+ return 512 * 1024;
+ }
+}
+
+void qed_init_dp(struct qed_dev *cdev, u32 dp_module, u8 dp_level)
+{
+ u32 i;
+
+ cdev->dp_level = dp_level;
+ cdev->dp_module = dp_module;
+ for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ p_hwfn->dp_level = dp_level;
+ p_hwfn->dp_module = dp_module;
+ }
+}
+
+void qed_init_struct(struct qed_dev *cdev)
+{
+ u8 i;
+
+ for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ p_hwfn->cdev = cdev;
+ p_hwfn->my_id = i;
+ p_hwfn->b_active = false;
+
+ mutex_init(&p_hwfn->dmae_info.mutex);
+ }
+
+ /* hwfn 0 is always active */
+ cdev->hwfns[0].b_active = true;
+
+ /* set the default cache alignment to 128 */
+ cdev->cache_shift = 7;
+}
+
+static void qed_qm_info_free(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+
+ kfree(qm_info->qm_pq_params);
+ qm_info->qm_pq_params = NULL;
+ kfree(qm_info->qm_vport_params);
+ qm_info->qm_vport_params = NULL;
+ kfree(qm_info->qm_port_params);
+ qm_info->qm_port_params = NULL;
+ kfree(qm_info->wfq_data);
+ qm_info->wfq_data = NULL;
+}
+
+static void qed_dbg_user_data_free(struct qed_hwfn *p_hwfn)
+{
+ kfree(p_hwfn->dbg_user_info);
+ p_hwfn->dbg_user_info = NULL;
+}
+
+void qed_resc_free(struct qed_dev *cdev)
+{
+ struct qed_rdma_info *rdma_info;
+ struct qed_hwfn *p_hwfn;
+ int i;
+
+ if (IS_VF(cdev)) {
+ for_each_hwfn(cdev, i)
+ qed_l2_free(&cdev->hwfns[i]);
+ return;
+ }
+
+ kfree(cdev->fw_data);
+ cdev->fw_data = NULL;
+
+ kfree(cdev->reset_stats);
+ cdev->reset_stats = NULL;
+
+ qed_llh_free(cdev);
+
+ for_each_hwfn(cdev, i) {
+ p_hwfn = cdev->hwfns + i;
+ rdma_info = p_hwfn->p_rdma_info;
+
+ qed_cxt_mngr_free(p_hwfn);
+ qed_qm_info_free(p_hwfn);
+ qed_spq_free(p_hwfn);
+ qed_eq_free(p_hwfn);
+ qed_consq_free(p_hwfn);
+ qed_int_free(p_hwfn);
+#ifdef CONFIG_QED_LL2
+ qed_ll2_free(p_hwfn);
+#endif
+ if (p_hwfn->hw_info.personality == QED_PCI_FCOE)
+ qed_fcoe_free(p_hwfn);
+
+ if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
+ qed_iscsi_free(p_hwfn);
+ qed_ooo_free(p_hwfn);
+ }
+
+ if (p_hwfn->hw_info.personality == QED_PCI_NVMETCP) {
+ qed_nvmetcp_free(p_hwfn);
+ qed_ooo_free(p_hwfn);
+ }
+
+ if (QED_IS_RDMA_PERSONALITY(p_hwfn) && rdma_info) {
+ qed_spq_unregister_async_cb(p_hwfn, rdma_info->proto);
+ qed_rdma_info_free(p_hwfn);
+ }
+
+ qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_COMMON);
+ qed_iov_free(p_hwfn);
+ qed_l2_free(p_hwfn);
+ qed_dmae_info_free(p_hwfn);
+ qed_dcbx_info_free(p_hwfn);
+ qed_dbg_user_data_free(p_hwfn);
+ qed_fw_overlay_mem_free(p_hwfn, &p_hwfn->fw_overlay_mem);
+
+ /* Destroy doorbell recovery mechanism */
+ qed_db_recovery_teardown(p_hwfn);
+ }
+}
+
+/******************** QM initialization *******************/
+#define ACTIVE_TCS_BMAP 0x9f
+#define ACTIVE_TCS_BMAP_4PORT_K2 0xf
+
+/* determines the physical queue flags for a given PF. */
+static u32 qed_get_pq_flags(struct qed_hwfn *p_hwfn)
+{
+ u32 flags;
+
+ /* common flags */
+ flags = PQ_FLAGS_LB;
+
+ /* feature flags */
+ if (IS_QED_SRIOV(p_hwfn->cdev))
+ flags |= PQ_FLAGS_VFS;
+
+ /* protocol flags */
+ switch (p_hwfn->hw_info.personality) {
+ case QED_PCI_ETH:
+ flags |= PQ_FLAGS_MCOS;
+ break;
+ case QED_PCI_FCOE:
+ flags |= PQ_FLAGS_OFLD;
+ break;
+ case QED_PCI_ISCSI:
+ case QED_PCI_NVMETCP:
+ flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD;
+ break;
+ case QED_PCI_ETH_ROCE:
+ flags |= PQ_FLAGS_MCOS | PQ_FLAGS_OFLD | PQ_FLAGS_LLT;
+ if (IS_QED_MULTI_TC_ROCE(p_hwfn))
+ flags |= PQ_FLAGS_MTC;
+ break;
+ case QED_PCI_ETH_IWARP:
+ flags |= PQ_FLAGS_MCOS | PQ_FLAGS_ACK | PQ_FLAGS_OOO |
+ PQ_FLAGS_OFLD;
+ break;
+ default:
+ DP_ERR(p_hwfn,
+ "unknown personality %d\n", p_hwfn->hw_info.personality);
+ return 0;
+ }
+
+ return flags;
+}
+
+/* Getters for resource amounts necessary for qm initialization */
+static u8 qed_init_qm_get_num_tcs(struct qed_hwfn *p_hwfn)
+{
+ return p_hwfn->hw_info.num_hw_tc;
+}
+
+static u16 qed_init_qm_get_num_vfs(struct qed_hwfn *p_hwfn)
+{
+ return IS_QED_SRIOV(p_hwfn->cdev) ?
+ p_hwfn->cdev->p_iov_info->total_vfs : 0;
+}
+
+static u8 qed_init_qm_get_num_mtc_tcs(struct qed_hwfn *p_hwfn)
+{
+ u32 pq_flags = qed_get_pq_flags(p_hwfn);
+
+ if (!(PQ_FLAGS_MTC & pq_flags))
+ return 1;
+
+ return qed_init_qm_get_num_tcs(p_hwfn);
+}
+
+#define NUM_DEFAULT_RLS 1
+
+static u16 qed_init_qm_get_num_pf_rls(struct qed_hwfn *p_hwfn)
+{
+ u16 num_pf_rls, num_vfs = qed_init_qm_get_num_vfs(p_hwfn);
+
+ /* num RLs can't exceed resource amount of rls or vports */
+ num_pf_rls = (u16)min_t(u32, RESC_NUM(p_hwfn, QED_RL),
+ RESC_NUM(p_hwfn, QED_VPORT));
+
+ /* Make sure after we reserve there's something left */
+ if (num_pf_rls < num_vfs + NUM_DEFAULT_RLS)
+ return 0;
+
+ /* subtract rls necessary for VFs and one default one for the PF */
+ num_pf_rls -= num_vfs + NUM_DEFAULT_RLS;
+
+ return num_pf_rls;
+}
+
+static u16 qed_init_qm_get_num_vports(struct qed_hwfn *p_hwfn)
+{
+ u32 pq_flags = qed_get_pq_flags(p_hwfn);
+
+ /* all pqs share the same vport, except for vfs and pf_rl pqs */
+ return (!!(PQ_FLAGS_RLS & pq_flags)) *
+ qed_init_qm_get_num_pf_rls(p_hwfn) +
+ (!!(PQ_FLAGS_VFS & pq_flags)) *
+ qed_init_qm_get_num_vfs(p_hwfn) + 1;
+}
+
+/* calc amount of PQs according to the requested flags */
+static u16 qed_init_qm_get_num_pqs(struct qed_hwfn *p_hwfn)
+{
+ u32 pq_flags = qed_get_pq_flags(p_hwfn);
+
+ return (!!(PQ_FLAGS_RLS & pq_flags)) *
+ qed_init_qm_get_num_pf_rls(p_hwfn) +
+ (!!(PQ_FLAGS_MCOS & pq_flags)) *
+ qed_init_qm_get_num_tcs(p_hwfn) +
+ (!!(PQ_FLAGS_LB & pq_flags)) + (!!(PQ_FLAGS_OOO & pq_flags)) +
+ (!!(PQ_FLAGS_ACK & pq_flags)) +
+ (!!(PQ_FLAGS_OFLD & pq_flags)) *
+ qed_init_qm_get_num_mtc_tcs(p_hwfn) +
+ (!!(PQ_FLAGS_LLT & pq_flags)) *
+ qed_init_qm_get_num_mtc_tcs(p_hwfn) +
+ (!!(PQ_FLAGS_VFS & pq_flags)) * qed_init_qm_get_num_vfs(p_hwfn);
+}
+
+/* initialize the top level QM params */
+static void qed_init_qm_params(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+ bool four_port;
+
+ /* pq and vport bases for this PF */
+ qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ);
+ qm_info->start_vport = (u8)RESC_START(p_hwfn, QED_VPORT);
+
+ /* rate limiting and weighted fair queueing are always enabled */
+ qm_info->vport_rl_en = true;
+ qm_info->vport_wfq_en = true;
+
+ /* TC config is different for AH 4 port */
+ four_port = p_hwfn->cdev->num_ports_in_engine == MAX_NUM_PORTS_K2;
+
+ /* in AH 4 port we have fewer TCs per port */
+ qm_info->max_phys_tcs_per_port = four_port ? NUM_PHYS_TCS_4PORT_K2 :
+ NUM_OF_PHYS_TCS;
+
+ /* unless MFW indicated otherwise, ooo_tc == 3 for
+ * AH 4-port and 4 otherwise.
+ */
+ if (!qm_info->ooo_tc)
+ qm_info->ooo_tc = four_port ? DCBX_TCP_OOO_K2_4PORT_TC :
+ DCBX_TCP_OOO_TC;
+}
+
+/* initialize qm vport params */
+static void qed_init_qm_vport_params(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+ u8 i;
+
+ /* all vports participate in weighted fair queueing */
+ for (i = 0; i < qed_init_qm_get_num_vports(p_hwfn); i++)
+ qm_info->qm_vport_params[i].wfq = 1;
+}
+
+/* initialize qm port params */
+static void qed_init_qm_port_params(struct qed_hwfn *p_hwfn)
+{
+ /* Initialize qm port parameters */
+ u8 i, active_phys_tcs, num_ports = p_hwfn->cdev->num_ports_in_engine;
+ struct qed_dev *cdev = p_hwfn->cdev;
+
+ /* indicate how ooo and high pri traffic is dealt with */
+ active_phys_tcs = num_ports == MAX_NUM_PORTS_K2 ?
+ ACTIVE_TCS_BMAP_4PORT_K2 :
+ ACTIVE_TCS_BMAP;
+
+ for (i = 0; i < num_ports; i++) {
+ struct init_qm_port_params *p_qm_port =
+ &p_hwfn->qm_info.qm_port_params[i];
+ u16 pbf_max_cmd_lines;
+
+ p_qm_port->active = 1;
+ p_qm_port->active_phys_tcs = active_phys_tcs;
+ pbf_max_cmd_lines = (u16)NUM_OF_PBF_CMD_LINES(cdev);
+ p_qm_port->num_pbf_cmd_lines = pbf_max_cmd_lines / num_ports;
+ p_qm_port->num_btb_blocks = NUM_OF_BTB_BLOCKS(cdev) / num_ports;
+ }
+}
+
+/* Reset the params which must be reset for qm init. QM init may be called as
+ * a result of flows other than driver load (e.g. dcbx renegotiation). Other
+ * params may be affected by the init but would simply recalculate to the same
+ * values. The allocations made for QM init, ports, vports, pqs and vfqs are not
+ * affected as these amounts stay the same.
+ */
+static void qed_init_qm_reset_params(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+
+ qm_info->num_pqs = 0;
+ qm_info->num_vports = 0;
+ qm_info->num_pf_rls = 0;
+ qm_info->num_vf_pqs = 0;
+ qm_info->first_vf_pq = 0;
+ qm_info->first_mcos_pq = 0;
+ qm_info->first_rl_pq = 0;
+}
+
+static void qed_init_qm_advance_vport(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+
+ qm_info->num_vports++;
+
+ if (qm_info->num_vports > qed_init_qm_get_num_vports(p_hwfn))
+ DP_ERR(p_hwfn,
+ "vport overflow! qm_info->num_vports %d, qm_init_get_num_vports() %d\n",
+ qm_info->num_vports, qed_init_qm_get_num_vports(p_hwfn));
+}
+
+/* initialize a single pq and manage qm_info resources accounting.
+ * The pq_init_flags param determines whether the PQ is rate limited
+ * (for VF or PF) and whether a new vport is allocated to the pq or not
+ * (i.e. vport will be shared).
+ */
+
+/* flags for pq init */
+#define PQ_INIT_SHARE_VPORT BIT(0)
+#define PQ_INIT_PF_RL BIT(1)
+#define PQ_INIT_VF_RL BIT(2)
+
+/* defines for pq init */
+#define PQ_INIT_DEFAULT_WRR_GROUP 1
+#define PQ_INIT_DEFAULT_TC 0
+
+void qed_hw_info_set_offload_tc(struct qed_hw_info *p_info, u8 tc)
+{
+ p_info->offload_tc = tc;
+ p_info->offload_tc_set = true;
+}
+
+static bool qed_is_offload_tc_set(struct qed_hwfn *p_hwfn)
+{
+ return p_hwfn->hw_info.offload_tc_set;
+}
+
+static u32 qed_get_offload_tc(struct qed_hwfn *p_hwfn)
+{
+ if (qed_is_offload_tc_set(p_hwfn))
+ return p_hwfn->hw_info.offload_tc;
+
+ return PQ_INIT_DEFAULT_TC;
+}
+
+static void qed_init_qm_pq(struct qed_hwfn *p_hwfn,
+ struct qed_qm_info *qm_info,
+ u8 tc, u32 pq_init_flags)
+{
+ u16 pq_idx = qm_info->num_pqs, max_pq = qed_init_qm_get_num_pqs(p_hwfn);
+
+ if (pq_idx > max_pq)
+ DP_ERR(p_hwfn,
+ "pq overflow! pq %d, max pq %d\n", pq_idx, max_pq);
+
+ /* init pq params */
+ qm_info->qm_pq_params[pq_idx].port_id = p_hwfn->port_id;
+ qm_info->qm_pq_params[pq_idx].vport_id = qm_info->start_vport +
+ qm_info->num_vports;
+ qm_info->qm_pq_params[pq_idx].tc_id = tc;
+ qm_info->qm_pq_params[pq_idx].wrr_group = PQ_INIT_DEFAULT_WRR_GROUP;
+ qm_info->qm_pq_params[pq_idx].rl_valid =
+ (pq_init_flags & PQ_INIT_PF_RL || pq_init_flags & PQ_INIT_VF_RL);
+
+ /* qm params accounting */
+ qm_info->num_pqs++;
+ if (!(pq_init_flags & PQ_INIT_SHARE_VPORT))
+ qm_info->num_vports++;
+
+ if (pq_init_flags & PQ_INIT_PF_RL)
+ qm_info->num_pf_rls++;
+
+ if (qm_info->num_vports > qed_init_qm_get_num_vports(p_hwfn))
+ DP_ERR(p_hwfn,
+ "vport overflow! qm_info->num_vports %d, qm_init_get_num_vports() %d\n",
+ qm_info->num_vports, qed_init_qm_get_num_vports(p_hwfn));
+
+ if (qm_info->num_pf_rls > qed_init_qm_get_num_pf_rls(p_hwfn))
+ DP_ERR(p_hwfn,
+ "rl overflow! qm_info->num_pf_rls %d, qm_init_get_num_pf_rls() %d\n",
+ qm_info->num_pf_rls, qed_init_qm_get_num_pf_rls(p_hwfn));
+}
+
+/* get pq index according to PQ_FLAGS */
+static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn,
+ unsigned long pq_flags)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+
+ /* Can't have multiple flags set here */
+ if (bitmap_weight(&pq_flags,
+ sizeof(pq_flags) * BITS_PER_BYTE) > 1) {
+ DP_ERR(p_hwfn, "requested multiple pq flags 0x%lx\n", pq_flags);
+ goto err;
+ }
+
+ if (!(qed_get_pq_flags(p_hwfn) & pq_flags)) {
+ DP_ERR(p_hwfn, "pq flag 0x%lx is not set\n", pq_flags);
+ goto err;
+ }
+
+ switch (pq_flags) {
+ case PQ_FLAGS_RLS:
+ return &qm_info->first_rl_pq;
+ case PQ_FLAGS_MCOS:
+ return &qm_info->first_mcos_pq;
+ case PQ_FLAGS_LB:
+ return &qm_info->pure_lb_pq;
+ case PQ_FLAGS_OOO:
+ return &qm_info->ooo_pq;
+ case PQ_FLAGS_ACK:
+ return &qm_info->pure_ack_pq;
+ case PQ_FLAGS_OFLD:
+ return &qm_info->first_ofld_pq;
+ case PQ_FLAGS_LLT:
+ return &qm_info->first_llt_pq;
+ case PQ_FLAGS_VFS:
+ return &qm_info->first_vf_pq;
+ default:
+ goto err;
+ }
+
+err:
+ return &qm_info->start_pq;
+}
+
+/* save pq index in qm info */
+static void qed_init_qm_set_idx(struct qed_hwfn *p_hwfn,
+ u32 pq_flags, u16 pq_val)
+{
+ u16 *base_pq_idx = qed_init_qm_get_idx_from_flags(p_hwfn, pq_flags);
+
+ *base_pq_idx = p_hwfn->qm_info.start_pq + pq_val;
+}
+
+/* get tx pq index, with the PQ TX base already set (ready for context init) */
+u16 qed_get_cm_pq_idx(struct qed_hwfn *p_hwfn, u32 pq_flags)
+{
+ u16 *base_pq_idx = qed_init_qm_get_idx_from_flags(p_hwfn, pq_flags);
+
+ return *base_pq_idx + CM_TX_PQ_BASE;
+}
+
+u16 qed_get_cm_pq_idx_mcos(struct qed_hwfn *p_hwfn, u8 tc)
+{
+ u8 max_tc = qed_init_qm_get_num_tcs(p_hwfn);
+
+ if (max_tc == 0) {
+ DP_ERR(p_hwfn, "pq with flag 0x%lx do not exist\n",
+ PQ_FLAGS_MCOS);
+ return p_hwfn->qm_info.start_pq;
+ }
+
+ if (tc > max_tc)
+ DP_ERR(p_hwfn, "tc %d must be smaller than %d\n", tc, max_tc);
+
+ return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + (tc % max_tc);
+}
+
+u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf)
+{
+ u16 max_vf = qed_init_qm_get_num_vfs(p_hwfn);
+
+ if (max_vf == 0) {
+ DP_ERR(p_hwfn, "pq with flag 0x%lx do not exist\n",
+ PQ_FLAGS_VFS);
+ return p_hwfn->qm_info.start_pq;
+ }
+
+ if (vf > max_vf)
+ DP_ERR(p_hwfn, "vf %d must be smaller than %d\n", vf, max_vf);
+
+ return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + (vf % max_vf);
+}
+
+u16 qed_get_cm_pq_idx_ofld_mtc(struct qed_hwfn *p_hwfn, u8 tc)
+{
+ u16 first_ofld_pq, pq_offset;
+
+ first_ofld_pq = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
+ pq_offset = (tc < qed_init_qm_get_num_mtc_tcs(p_hwfn)) ?
+ tc : PQ_INIT_DEFAULT_TC;
+
+ return first_ofld_pq + pq_offset;
+}
+
+u16 qed_get_cm_pq_idx_llt_mtc(struct qed_hwfn *p_hwfn, u8 tc)
+{
+ u16 first_llt_pq, pq_offset;
+
+ first_llt_pq = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LLT);
+ pq_offset = (tc < qed_init_qm_get_num_mtc_tcs(p_hwfn)) ?
+ tc : PQ_INIT_DEFAULT_TC;
+
+ return first_llt_pq + pq_offset;
+}
+
+/* Functions for creating specific types of pqs */
+static void qed_init_qm_lb_pq(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+
+ if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_LB))
+ return;
+
+ qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_LB, qm_info->num_pqs);
+ qed_init_qm_pq(p_hwfn, qm_info, PURE_LB_TC, PQ_INIT_SHARE_VPORT);
+}
+
+static void qed_init_qm_ooo_pq(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+
+ if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_OOO))
+ return;
+
+ qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_OOO, qm_info->num_pqs);
+ qed_init_qm_pq(p_hwfn, qm_info, qm_info->ooo_tc, PQ_INIT_SHARE_VPORT);
+}
+
+static void qed_init_qm_pure_ack_pq(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+
+ if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_ACK))
+ return;
+
+ qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_ACK, qm_info->num_pqs);
+ qed_init_qm_pq(p_hwfn, qm_info, qed_get_offload_tc(p_hwfn),
+ PQ_INIT_SHARE_VPORT);
+}
+
+static void qed_init_qm_mtc_pqs(struct qed_hwfn *p_hwfn)
+{
+ u8 num_tcs = qed_init_qm_get_num_mtc_tcs(p_hwfn);
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+ u8 tc;
+
+ /* override pq's TC if offload TC is set */
+ for (tc = 0; tc < num_tcs; tc++)
+ qed_init_qm_pq(p_hwfn, qm_info,
+ qed_is_offload_tc_set(p_hwfn) ?
+ p_hwfn->hw_info.offload_tc : tc,
+ PQ_INIT_SHARE_VPORT);
+}
+
+static void qed_init_qm_offload_pq(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+
+ if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_OFLD))
+ return;
+
+ qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_OFLD, qm_info->num_pqs);
+ qed_init_qm_mtc_pqs(p_hwfn);
+}
+
+static void qed_init_qm_low_latency_pq(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+
+ if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_LLT))
+ return;
+
+ qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_LLT, qm_info->num_pqs);
+ qed_init_qm_mtc_pqs(p_hwfn);
+}
+
+static void qed_init_qm_mcos_pqs(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+ u8 tc_idx;
+
+ if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_MCOS))
+ return;
+
+ qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_MCOS, qm_info->num_pqs);
+ for (tc_idx = 0; tc_idx < qed_init_qm_get_num_tcs(p_hwfn); tc_idx++)
+ qed_init_qm_pq(p_hwfn, qm_info, tc_idx, PQ_INIT_SHARE_VPORT);
+}
+
+static void qed_init_qm_vf_pqs(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+ u16 vf_idx, num_vfs = qed_init_qm_get_num_vfs(p_hwfn);
+
+ if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_VFS))
+ return;
+
+ qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_VFS, qm_info->num_pqs);
+ qm_info->num_vf_pqs = num_vfs;
+ for (vf_idx = 0; vf_idx < num_vfs; vf_idx++)
+ qed_init_qm_pq(p_hwfn,
+ qm_info, PQ_INIT_DEFAULT_TC, PQ_INIT_VF_RL);
+}
+
+static void qed_init_qm_rl_pqs(struct qed_hwfn *p_hwfn)
+{
+ u16 pf_rls_idx, num_pf_rls = qed_init_qm_get_num_pf_rls(p_hwfn);
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+
+ if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_RLS))
+ return;
+
+ qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_RLS, qm_info->num_pqs);
+ for (pf_rls_idx = 0; pf_rls_idx < num_pf_rls; pf_rls_idx++)
+ qed_init_qm_pq(p_hwfn, qm_info, qed_get_offload_tc(p_hwfn),
+ PQ_INIT_PF_RL);
+}
+
+static void qed_init_qm_pq_params(struct qed_hwfn *p_hwfn)
+{
+ /* rate limited pqs, must come first (FW assumption) */
+ qed_init_qm_rl_pqs(p_hwfn);
+
+ /* pqs for multi cos */
+ qed_init_qm_mcos_pqs(p_hwfn);
+
+ /* pure loopback pq */
+ qed_init_qm_lb_pq(p_hwfn);
+
+ /* out of order pq */
+ qed_init_qm_ooo_pq(p_hwfn);
+
+ /* pure ack pq */
+ qed_init_qm_pure_ack_pq(p_hwfn);
+
+ /* pq for offloaded protocol */
+ qed_init_qm_offload_pq(p_hwfn);
+
+ /* low latency pq */
+ qed_init_qm_low_latency_pq(p_hwfn);
+
+ /* done sharing vports */
+ qed_init_qm_advance_vport(p_hwfn);
+
+ /* pqs for vfs */
+ qed_init_qm_vf_pqs(p_hwfn);
+}
+
+/* compare values of getters against resources amounts */
+static int qed_init_qm_sanity(struct qed_hwfn *p_hwfn)
+{
+ if (qed_init_qm_get_num_vports(p_hwfn) > RESC_NUM(p_hwfn, QED_VPORT)) {
+ DP_ERR(p_hwfn, "requested amount of vports exceeds resource\n");
+ return -EINVAL;
+ }
+
+ if (qed_init_qm_get_num_pqs(p_hwfn) <= RESC_NUM(p_hwfn, QED_PQ))
+ return 0;
+
+ if (QED_IS_ROCE_PERSONALITY(p_hwfn)) {
+ p_hwfn->hw_info.multi_tc_roce_en = false;
+ DP_NOTICE(p_hwfn,
+ "multi-tc roce was disabled to reduce requested amount of pqs\n");
+ if (qed_init_qm_get_num_pqs(p_hwfn) <= RESC_NUM(p_hwfn, QED_PQ))
+ return 0;
+ }
+
+ DP_ERR(p_hwfn, "requested amount of pqs exceeds resource\n");
+ return -EINVAL;
+}
+
+static void qed_dp_init_qm_params(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+ struct init_qm_vport_params *vport;
+ struct init_qm_port_params *port;
+ struct init_qm_pq_params *pq;
+ int i, tc;
+
+ /* top level params */
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_HW,
+ "qm init top level params: start_pq %d, start_vport %d, pure_lb_pq %d, offload_pq %d, llt_pq %d, pure_ack_pq %d\n",
+ qm_info->start_pq,
+ qm_info->start_vport,
+ qm_info->pure_lb_pq,
+ qm_info->first_ofld_pq,
+ qm_info->first_llt_pq,
+ qm_info->pure_ack_pq);
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_HW,
+ "ooo_pq %d, first_vf_pq %d, num_pqs %d, num_vf_pqs %d, num_vports %d, max_phys_tcs_per_port %d\n",
+ qm_info->ooo_pq,
+ qm_info->first_vf_pq,
+ qm_info->num_pqs,
+ qm_info->num_vf_pqs,
+ qm_info->num_vports, qm_info->max_phys_tcs_per_port);
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_HW,
+ "pf_rl_en %d, pf_wfq_en %d, vport_rl_en %d, vport_wfq_en %d, pf_wfq %d, pf_rl %d, num_pf_rls %d, pq_flags %x\n",
+ qm_info->pf_rl_en,
+ qm_info->pf_wfq_en,
+ qm_info->vport_rl_en,
+ qm_info->vport_wfq_en,
+ qm_info->pf_wfq,
+ qm_info->pf_rl,
+ qm_info->num_pf_rls, qed_get_pq_flags(p_hwfn));
+
+ /* port table */
+ for (i = 0; i < p_hwfn->cdev->num_ports_in_engine; i++) {
+ port = &(qm_info->qm_port_params[i]);
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_HW,
+ "port idx %d, active %d, active_phys_tcs %d, num_pbf_cmd_lines %d, num_btb_blocks %d, reserved %d\n",
+ i,
+ port->active,
+ port->active_phys_tcs,
+ port->num_pbf_cmd_lines,
+ port->num_btb_blocks, port->reserved);
+ }
+
+ /* vport table */
+ for (i = 0; i < qm_info->num_vports; i++) {
+ vport = &(qm_info->qm_vport_params[i]);
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_HW,
+ "vport idx %d, wfq %d, first_tx_pq_id [ ",
+ qm_info->start_vport + i, vport->wfq);
+ for (tc = 0; tc < NUM_OF_TCS; tc++)
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_HW,
+ "%d ", vport->first_tx_pq_id[tc]);
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "]\n");
+ }
+
+ /* pq table */
+ for (i = 0; i < qm_info->num_pqs; i++) {
+ pq = &(qm_info->qm_pq_params[i]);
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_HW,
+ "pq idx %d, port %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d rl_id %d\n",
+ qm_info->start_pq + i,
+ pq->port_id,
+ pq->vport_id,
+ pq->tc_id, pq->wrr_group, pq->rl_valid, pq->rl_id);
+ }
+}
+
+static void qed_init_qm_info(struct qed_hwfn *p_hwfn)
+{
+ /* reset params required for init run */
+ qed_init_qm_reset_params(p_hwfn);
+
+ /* init QM top level params */
+ qed_init_qm_params(p_hwfn);
+
+ /* init QM port params */
+ qed_init_qm_port_params(p_hwfn);
+
+ /* init QM vport params */
+ qed_init_qm_vport_params(p_hwfn);
+
+ /* init QM physical queue params */
+ qed_init_qm_pq_params(p_hwfn);
+
+ /* display all that init */
+ qed_dp_init_qm_params(p_hwfn);
+}
+
+/* This function reconfigures the QM pf on the fly.
+ * For this purpose we:
+ * 1. reconfigure the QM database
+ * 2. set new values to runtime array
+ * 3. send an sdm_qm_cmd through the rbc interface to stop the QM
+ * 4. activate init tool in QM_PF stage
+ * 5. send an sdm_qm_cmd through rbc interface to release the QM
+ */
+int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+ bool b_rc;
+ int rc;
+
+ /* initialize qed's qm data structure */
+ qed_init_qm_info(p_hwfn);
+
+ /* stop PF's qm queues */
+ spin_lock_bh(&qm_lock);
+ b_rc = qed_send_qm_stop_cmd(p_hwfn, p_ptt, false, true,
+ qm_info->start_pq, qm_info->num_pqs);
+ spin_unlock_bh(&qm_lock);
+ if (!b_rc)
+ return -EINVAL;
+
+ /* prepare QM portion of runtime array */
+ qed_qm_init_pf(p_hwfn, p_ptt, false);
+
+ /* activate init tool on runtime array */
+ rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id,
+ p_hwfn->hw_info.hw_mode);
+ if (rc)
+ return rc;
+
+ /* start PF's qm queues */
+ spin_lock_bh(&qm_lock);
+ b_rc = qed_send_qm_stop_cmd(p_hwfn, p_ptt, true, true,
+ qm_info->start_pq, qm_info->num_pqs);
+ spin_unlock_bh(&qm_lock);
+ if (!b_rc)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int qed_alloc_qm_data(struct qed_hwfn *p_hwfn)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+ int rc;
+
+ rc = qed_init_qm_sanity(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ qm_info->qm_pq_params = kcalloc(qed_init_qm_get_num_pqs(p_hwfn),
+ sizeof(*qm_info->qm_pq_params),
+ GFP_KERNEL);
+ if (!qm_info->qm_pq_params)
+ goto alloc_err;
+
+ qm_info->qm_vport_params = kcalloc(qed_init_qm_get_num_vports(p_hwfn),
+ sizeof(*qm_info->qm_vport_params),
+ GFP_KERNEL);
+ if (!qm_info->qm_vport_params)
+ goto alloc_err;
+
+ qm_info->qm_port_params = kcalloc(p_hwfn->cdev->num_ports_in_engine,
+ sizeof(*qm_info->qm_port_params),
+ GFP_KERNEL);
+ if (!qm_info->qm_port_params)
+ goto alloc_err;
+
+ qm_info->wfq_data = kcalloc(qed_init_qm_get_num_vports(p_hwfn),
+ sizeof(*qm_info->wfq_data),
+ GFP_KERNEL);
+ if (!qm_info->wfq_data)
+ goto alloc_err;
+
+ return 0;
+
+alloc_err:
+ DP_NOTICE(p_hwfn, "Failed to allocate memory for QM params\n");
+ qed_qm_info_free(p_hwfn);
+ return -ENOMEM;
+}
+
+int qed_resc_alloc(struct qed_dev *cdev)
+{
+ u32 rdma_tasks, excess_tasks;
+ u32 line_count;
+ int i, rc = 0;
+
+ if (IS_VF(cdev)) {
+ for_each_hwfn(cdev, i) {
+ rc = qed_l2_alloc(&cdev->hwfns[i]);
+ if (rc)
+ return rc;
+ }
+ return rc;
+ }
+
+ cdev->fw_data = kzalloc(sizeof(*cdev->fw_data), GFP_KERNEL);
+ if (!cdev->fw_data)
+ return -ENOMEM;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ u32 n_eqes, num_cons;
+
+ /* Initialize the doorbell recovery mechanism */
+ rc = qed_db_recovery_setup(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ /* First allocate the context manager structure */
+ rc = qed_cxt_mngr_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ /* Set the HW cid/tid numbers (in the contest manager)
+ * Must be done prior to any further computations.
+ */
+ rc = qed_cxt_set_pf_params(p_hwfn, RDMA_MAX_TIDS);
+ if (rc)
+ goto alloc_err;
+
+ rc = qed_alloc_qm_data(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ /* init qm info */
+ qed_init_qm_info(p_hwfn);
+
+ /* Compute the ILT client partition */
+ rc = qed_cxt_cfg_ilt_compute(p_hwfn, &line_count);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "too many ILT lines; re-computing with less lines\n");
+ /* In case there are not enough ILT lines we reduce the
+ * number of RDMA tasks and re-compute.
+ */
+ excess_tasks =
+ qed_cxt_cfg_ilt_compute_excess(p_hwfn, line_count);
+ if (!excess_tasks)
+ goto alloc_err;
+
+ rdma_tasks = RDMA_MAX_TIDS - excess_tasks;
+ rc = qed_cxt_set_pf_params(p_hwfn, rdma_tasks);
+ if (rc)
+ goto alloc_err;
+
+ rc = qed_cxt_cfg_ilt_compute(p_hwfn, &line_count);
+ if (rc) {
+ DP_ERR(p_hwfn,
+ "failed ILT compute. Requested too many lines: %u\n",
+ line_count);
+
+ goto alloc_err;
+ }
+ }
+
+ /* CID map / ILT shadow table / T2
+ * The talbes sizes are determined by the computations above
+ */
+ rc = qed_cxt_tables_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ /* SPQ, must follow ILT because initializes SPQ context */
+ rc = qed_spq_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ /* SP status block allocation */
+ p_hwfn->p_dpc_ptt = qed_get_reserved_ptt(p_hwfn,
+ RESERVED_PTT_DPC);
+
+ rc = qed_int_alloc(p_hwfn, p_hwfn->p_main_ptt);
+ if (rc)
+ goto alloc_err;
+
+ rc = qed_iov_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ /* EQ */
+ n_eqes = qed_chain_get_capacity(&p_hwfn->p_spq->chain);
+ if (QED_IS_RDMA_PERSONALITY(p_hwfn)) {
+ u32 n_srq = qed_cxt_get_total_srq_count(p_hwfn);
+ enum protocol_type rdma_proto;
+
+ if (QED_IS_ROCE_PERSONALITY(p_hwfn))
+ rdma_proto = PROTOCOLID_ROCE;
+ else
+ rdma_proto = PROTOCOLID_IWARP;
+
+ num_cons = qed_cxt_get_proto_cid_count(p_hwfn,
+ rdma_proto,
+ NULL) * 2;
+ /* EQ should be able to get events from all SRQ's
+ * at the same time
+ */
+ n_eqes += num_cons + 2 * MAX_NUM_VFS_BB + n_srq;
+ } else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI ||
+ p_hwfn->hw_info.personality == QED_PCI_NVMETCP) {
+ num_cons =
+ qed_cxt_get_proto_cid_count(p_hwfn,
+ PROTOCOLID_TCP_ULP,
+ NULL);
+ n_eqes += 2 * num_cons;
+ }
+
+ if (n_eqes > 0xFFFF) {
+ DP_ERR(p_hwfn,
+ "Cannot allocate 0x%x EQ elements. The maximum of a u16 chain is 0x%x\n",
+ n_eqes, 0xFFFF);
+ goto alloc_no_mem;
+ }
+
+ rc = qed_eq_alloc(p_hwfn, (u16)n_eqes);
+ if (rc)
+ goto alloc_err;
+
+ rc = qed_consq_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ rc = qed_l2_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+#ifdef CONFIG_QED_LL2
+ if (p_hwfn->using_ll2) {
+ rc = qed_ll2_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
+ }
+#endif
+
+ if (p_hwfn->hw_info.personality == QED_PCI_FCOE) {
+ rc = qed_fcoe_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
+ }
+
+ if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
+ rc = qed_iscsi_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
+ rc = qed_ooo_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
+ }
+
+ if (p_hwfn->hw_info.personality == QED_PCI_NVMETCP) {
+ rc = qed_nvmetcp_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
+ rc = qed_ooo_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
+ }
+
+ if (QED_IS_RDMA_PERSONALITY(p_hwfn)) {
+ rc = qed_rdma_info_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
+ }
+
+ /* DMA info initialization */
+ rc = qed_dmae_info_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ /* DCBX initialization */
+ rc = qed_dcbx_info_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
+ rc = qed_dbg_alloc_user_data(p_hwfn, &p_hwfn->dbg_user_info);
+ if (rc)
+ goto alloc_err;
+ }
+
+ rc = qed_llh_alloc(cdev);
+ if (rc) {
+ DP_NOTICE(cdev,
+ "Failed to allocate memory for the llh_info structure\n");
+ goto alloc_err;
+ }
+
+ cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL);
+ if (!cdev->reset_stats)
+ goto alloc_no_mem;
+
+ return 0;
+
+alloc_no_mem:
+ rc = -ENOMEM;
+alloc_err:
+ qed_resc_free(cdev);
+ return rc;
+}
+
+static int qed_fw_err_handler(struct qed_hwfn *p_hwfn,
+ u8 opcode,
+ u16 echo,
+ union event_ring_data *data, u8 fw_return_code)
+{
+ if (fw_return_code != COMMON_ERR_CODE_ERROR)
+ goto eqe_unexpected;
+
+ if (data->err_data.recovery_scope == ERR_SCOPE_FUNC &&
+ le16_to_cpu(data->err_data.entity_id) >= MAX_NUM_PFS) {
+ qed_sriov_vfpf_malicious(p_hwfn, &data->err_data);
+ return 0;
+ }
+
+eqe_unexpected:
+ DP_ERR(p_hwfn,
+ "Skipping unexpected eqe 0x%02x, FW return code 0x%x, echo 0x%x\n",
+ opcode, fw_return_code, echo);
+ return -EINVAL;
+}
+
+static int qed_common_eqe_event(struct qed_hwfn *p_hwfn,
+ u8 opcode,
+ __le16 echo,
+ union event_ring_data *data,
+ u8 fw_return_code)
+{
+ switch (opcode) {
+ case COMMON_EVENT_VF_PF_CHANNEL:
+ case COMMON_EVENT_VF_FLR:
+ return qed_sriov_eqe_event(p_hwfn, opcode, echo, data,
+ fw_return_code);
+ case COMMON_EVENT_FW_ERROR:
+ return qed_fw_err_handler(p_hwfn, opcode,
+ le16_to_cpu(echo), data,
+ fw_return_code);
+ default:
+ DP_INFO(p_hwfn->cdev, "Unknown eqe event 0x%02x, echo 0x%x\n",
+ opcode, echo);
+ return -EINVAL;
+ }
+}
+
+void qed_resc_setup(struct qed_dev *cdev)
+{
+ int i;
+
+ if (IS_VF(cdev)) {
+ for_each_hwfn(cdev, i)
+ qed_l2_setup(&cdev->hwfns[i]);
+ return;
+ }
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ qed_cxt_mngr_setup(p_hwfn);
+ qed_spq_setup(p_hwfn);
+ qed_eq_setup(p_hwfn);
+ qed_consq_setup(p_hwfn);
+
+ /* Read shadow of current MFW mailbox */
+ qed_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt);
+ memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
+ p_hwfn->mcp_info->mfw_mb_cur,
+ p_hwfn->mcp_info->mfw_mb_length);
+
+ qed_int_setup(p_hwfn, p_hwfn->p_main_ptt);
+
+ qed_l2_setup(p_hwfn);
+ qed_iov_setup(p_hwfn);
+ qed_spq_register_async_cb(p_hwfn, PROTOCOLID_COMMON,
+ qed_common_eqe_event);
+#ifdef CONFIG_QED_LL2
+ if (p_hwfn->using_ll2)
+ qed_ll2_setup(p_hwfn);
+#endif
+ if (p_hwfn->hw_info.personality == QED_PCI_FCOE)
+ qed_fcoe_setup(p_hwfn);
+
+ if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
+ qed_iscsi_setup(p_hwfn);
+ qed_ooo_setup(p_hwfn);
+ }
+
+ if (p_hwfn->hw_info.personality == QED_PCI_NVMETCP) {
+ qed_nvmetcp_setup(p_hwfn);
+ qed_ooo_setup(p_hwfn);
+ }
+ }
+}
+
+#define FINAL_CLEANUP_POLL_CNT (100)
+#define FINAL_CLEANUP_POLL_TIME (10)
+int qed_final_cleanup(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u16 id, bool is_vf)
+{
+ u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT;
+ int rc = -EBUSY;
+
+ addr = GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_USDM_RAM,
+ USTORM_FLR_FINAL_ACK, p_hwfn->rel_pf_id);
+ if (is_vf)
+ id += 0x10;
+
+ command |= X_FINAL_CLEANUP_AGG_INT <<
+ SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT;
+ command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT;
+ command |= id << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT;
+ command |= SDM_COMP_TYPE_AGG_INT << SDM_OP_GEN_COMP_TYPE_SHIFT;
+
+ /* Make sure notification is not set before initiating final cleanup */
+ if (REG_RD(p_hwfn, addr)) {
+ DP_NOTICE(p_hwfn,
+ "Unexpected; Found final cleanup notification before initiating final cleanup\n");
+ REG_WR(p_hwfn, addr, 0);
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Sending final cleanup for PFVF[%d] [Command %08x]\n",
+ id, command);
+
+ qed_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command);
+
+ /* Poll until completion */
+ while (!REG_RD(p_hwfn, addr) && count--)
+ msleep(FINAL_CLEANUP_POLL_TIME);
+
+ if (REG_RD(p_hwfn, addr))
+ rc = 0;
+ else
+ DP_NOTICE(p_hwfn,
+ "Failed to receive FW final cleanup notification\n");
+
+ /* Cleanup afterwards */
+ REG_WR(p_hwfn, addr, 0);
+
+ return rc;
+}
+
+static int qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
+{
+ int hw_mode = 0;
+
+ if (QED_IS_BB_B0(p_hwfn->cdev)) {
+ hw_mode |= 1 << MODE_BB;
+ } else if (QED_IS_AH(p_hwfn->cdev)) {
+ hw_mode |= 1 << MODE_K2;
+ } else {
+ DP_NOTICE(p_hwfn, "Unknown chip type %#x\n",
+ p_hwfn->cdev->type);
+ return -EINVAL;
+ }
+
+ switch (p_hwfn->cdev->num_ports_in_engine) {
+ case 1:
+ hw_mode |= 1 << MODE_PORTS_PER_ENG_1;
+ break;
+ case 2:
+ hw_mode |= 1 << MODE_PORTS_PER_ENG_2;
+ break;
+ case 4:
+ hw_mode |= 1 << MODE_PORTS_PER_ENG_4;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "num_ports_in_engine = %d not supported\n",
+ p_hwfn->cdev->num_ports_in_engine);
+ return -EINVAL;
+ }
+
+ if (test_bit(QED_MF_OVLAN_CLSS, &p_hwfn->cdev->mf_bits))
+ hw_mode |= 1 << MODE_MF_SD;
+ else
+ hw_mode |= 1 << MODE_MF_SI;
+
+ hw_mode |= 1 << MODE_ASIC;
+
+ if (p_hwfn->cdev->num_hwfns > 1)
+ hw_mode |= 1 << MODE_100G;
+
+ p_hwfn->hw_info.hw_mode = hw_mode;
+
+ DP_VERBOSE(p_hwfn, (NETIF_MSG_PROBE | NETIF_MSG_IFUP),
+ "Configuring function for hw_mode: 0x%08x\n",
+ p_hwfn->hw_info.hw_mode);
+
+ return 0;
+}
+
+/* Init run time data for all PFs on an engine. */
+static void qed_init_cau_rt_data(struct qed_dev *cdev)
+{
+ u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET;
+ int i, igu_sb_id;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ struct qed_igu_info *p_igu_info;
+ struct qed_igu_block *p_block;
+ struct cau_sb_entry sb_entry;
+
+ p_igu_info = p_hwfn->hw_info.p_igu_info;
+
+ for (igu_sb_id = 0;
+ igu_sb_id < QED_MAPPING_MEMORY_SIZE(cdev); igu_sb_id++) {
+ p_block = &p_igu_info->entry[igu_sb_id];
+
+ if (!p_block->is_pf)
+ continue;
+
+ qed_init_cau_sb_entry(p_hwfn, &sb_entry,
+ p_block->function_id, 0, 0);
+ STORE_RT_REG_AGG(p_hwfn, offset + igu_sb_id * 2,
+ sb_entry);
+ }
+ }
+}
+
+static void qed_init_cache_line_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 val, wr_mbs, cache_line_size;
+
+ val = qed_rd(p_hwfn, p_ptt, PSWRQ2_REG_WR_MBS0);
+ switch (val) {
+ case 0:
+ wr_mbs = 128;
+ break;
+ case 1:
+ wr_mbs = 256;
+ break;
+ case 2:
+ wr_mbs = 512;
+ break;
+ default:
+ DP_INFO(p_hwfn,
+ "Unexpected value of PSWRQ2_REG_WR_MBS0 [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n",
+ val);
+ return;
+ }
+
+ cache_line_size = min_t(u32, L1_CACHE_BYTES, wr_mbs);
+ switch (cache_line_size) {
+ case 32:
+ val = 0;
+ break;
+ case 64:
+ val = 1;
+ break;
+ case 128:
+ val = 2;
+ break;
+ case 256:
+ val = 3;
+ break;
+ default:
+ DP_INFO(p_hwfn,
+ "Unexpected value of cache line size [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n",
+ cache_line_size);
+ }
+
+ if (wr_mbs < L1_CACHE_BYTES)
+ DP_INFO(p_hwfn,
+ "The cache line size for padding is suboptimal for performance [OS cache line size 0x%x, wr mbs 0x%x]\n",
+ L1_CACHE_BYTES, wr_mbs);
+
+ STORE_RT_REG(p_hwfn, PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET, val);
+ if (val > 0) {
+ STORE_RT_REG(p_hwfn, PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET, val);
+ STORE_RT_REG(p_hwfn, PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET, val);
+ }
+}
+
+static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, int hw_mode)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+ struct qed_qm_common_rt_init_params *params;
+ struct qed_dev *cdev = p_hwfn->cdev;
+ u8 vf_id, max_num_vfs;
+ u16 num_pfs, pf_id;
+ u32 concrete_fid;
+ int rc = 0;
+
+ params = kzalloc(sizeof(*params), GFP_KERNEL);
+ if (!params) {
+ DP_NOTICE(p_hwfn->cdev,
+ "Failed to allocate common init params\n");
+
+ return -ENOMEM;
+ }
+
+ qed_init_cau_rt_data(cdev);
+
+ /* Program GTT windows */
+ qed_gtt_init(p_hwfn);
+
+ if (p_hwfn->mcp_info) {
+ if (p_hwfn->mcp_info->func_info.bandwidth_max)
+ qm_info->pf_rl_en = true;
+ if (p_hwfn->mcp_info->func_info.bandwidth_min)
+ qm_info->pf_wfq_en = true;
+ }
+
+ params->max_ports_per_engine = p_hwfn->cdev->num_ports_in_engine;
+ params->max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
+ params->pf_rl_en = qm_info->pf_rl_en;
+ params->pf_wfq_en = qm_info->pf_wfq_en;
+ params->global_rl_en = qm_info->vport_rl_en;
+ params->vport_wfq_en = qm_info->vport_wfq_en;
+ params->port_params = qm_info->qm_port_params;
+
+ qed_qm_common_rt_init(p_hwfn, params);
+
+ qed_cxt_hw_init_common(p_hwfn);
+
+ qed_init_cache_line_size(p_hwfn, p_ptt);
+
+ rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
+ if (rc)
+ goto out;
+
+ qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0);
+ qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1);
+
+ if (QED_IS_BB(p_hwfn->cdev)) {
+ num_pfs = NUM_OF_ENG_PFS(p_hwfn->cdev);
+ for (pf_id = 0; pf_id < num_pfs; pf_id++) {
+ qed_fid_pretend(p_hwfn, p_ptt, pf_id);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
+ }
+ /* pretend to original PF */
+ qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
+ }
+
+ max_num_vfs = QED_IS_AH(cdev) ? MAX_NUM_VFS_K2 : MAX_NUM_VFS_BB;
+ for (vf_id = 0; vf_id < max_num_vfs; vf_id++) {
+ concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id);
+ qed_fid_pretend(p_hwfn, p_ptt, (u16)concrete_fid);
+ qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1);
+ qed_wr(p_hwfn, p_ptt, CCFC_REG_WEAK_ENABLE_VF, 0x0);
+ qed_wr(p_hwfn, p_ptt, TCFC_REG_STRONG_ENABLE_VF, 0x1);
+ qed_wr(p_hwfn, p_ptt, TCFC_REG_WEAK_ENABLE_VF, 0x0);
+ }
+ /* pretend to original PF */
+ qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
+
+out:
+ kfree(params);
+
+ return rc;
+}
+
+static int
+qed_hw_init_dpi_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 pwm_region_size, u32 n_cpus)
+{
+ u32 dpi_bit_shift, dpi_count, dpi_page_size;
+ u32 min_dpis;
+ u32 n_wids;
+
+ /* Calculate DPI size */
+ n_wids = max_t(u32, QED_MIN_WIDS, n_cpus);
+ dpi_page_size = QED_WID_SIZE * roundup_pow_of_two(n_wids);
+ dpi_page_size = (dpi_page_size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
+ dpi_bit_shift = ilog2(dpi_page_size / 4096);
+ dpi_count = pwm_region_size / dpi_page_size;
+
+ min_dpis = p_hwfn->pf_params.rdma_pf_params.min_dpis;
+ min_dpis = max_t(u32, QED_MIN_DPIS, min_dpis);
+
+ p_hwfn->dpi_size = dpi_page_size;
+ p_hwfn->dpi_count = dpi_count;
+
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPI_BIT_SHIFT, dpi_bit_shift);
+
+ if (dpi_count < min_dpis)
+ return -EINVAL;
+
+ return 0;
+}
+
+enum QED_ROCE_EDPM_MODE {
+ QED_ROCE_EDPM_MODE_ENABLE = 0,
+ QED_ROCE_EDPM_MODE_FORCE_ON = 1,
+ QED_ROCE_EDPM_MODE_DISABLE = 2,
+};
+
+bool qed_edpm_enabled(struct qed_hwfn *p_hwfn)
+{
+ if (p_hwfn->dcbx_no_edpm || p_hwfn->db_bar_no_edpm)
+ return false;
+
+ return true;
+}
+
+static int
+qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 pwm_regsize, norm_regsize;
+ u32 non_pwm_conn, min_addr_reg1;
+ u32 db_bar_size, n_cpus = 1;
+ u32 roce_edpm_mode;
+ u32 pf_dems_shift;
+ int rc = 0;
+ u8 cond;
+
+ db_bar_size = qed_hw_bar_size(p_hwfn, p_ptt, BAR_ID_1);
+ if (p_hwfn->cdev->num_hwfns > 1)
+ db_bar_size /= 2;
+
+ /* Calculate doorbell regions */
+ non_pwm_conn = qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_CORE) +
+ qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_CORE,
+ NULL) +
+ qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
+ NULL);
+ norm_regsize = roundup(QED_PF_DEMS_SIZE * non_pwm_conn, PAGE_SIZE);
+ min_addr_reg1 = norm_regsize / 4096;
+ pwm_regsize = db_bar_size - norm_regsize;
+
+ /* Check that the normal and PWM sizes are valid */
+ if (db_bar_size < norm_regsize) {
+ DP_ERR(p_hwfn->cdev,
+ "Doorbell BAR size 0x%x is too small (normal region is 0x%0x )\n",
+ db_bar_size, norm_regsize);
+ return -EINVAL;
+ }
+
+ if (pwm_regsize < QED_MIN_PWM_REGION) {
+ DP_ERR(p_hwfn->cdev,
+ "PWM region size 0x%0x is too small. Should be at least 0x%0x (Doorbell BAR size is 0x%x and normal region size is 0x%0x)\n",
+ pwm_regsize,
+ QED_MIN_PWM_REGION, db_bar_size, norm_regsize);
+ return -EINVAL;
+ }
+
+ /* Calculate number of DPIs */
+ roce_edpm_mode = p_hwfn->pf_params.rdma_pf_params.roce_edpm_mode;
+ if ((roce_edpm_mode == QED_ROCE_EDPM_MODE_ENABLE) ||
+ ((roce_edpm_mode == QED_ROCE_EDPM_MODE_FORCE_ON))) {
+ /* Either EDPM is mandatory, or we are attempting to allocate a
+ * WID per CPU.
+ */
+ n_cpus = num_present_cpus();
+ rc = qed_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus);
+ }
+
+ cond = (rc && (roce_edpm_mode == QED_ROCE_EDPM_MODE_ENABLE)) ||
+ (roce_edpm_mode == QED_ROCE_EDPM_MODE_DISABLE);
+ if (cond || p_hwfn->dcbx_no_edpm) {
+ /* Either EDPM is disabled from user configuration, or it is
+ * disabled via DCBx, or it is not mandatory and we failed to
+ * allocated a WID per CPU.
+ */
+ n_cpus = 1;
+ rc = qed_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus);
+
+ if (cond)
+ qed_rdma_dpm_bar(p_hwfn, p_ptt);
+ }
+
+ p_hwfn->wid_count = (u16)n_cpus;
+
+ DP_INFO(p_hwfn,
+ "doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s, page_size=%lu\n",
+ norm_regsize,
+ pwm_regsize,
+ p_hwfn->dpi_size,
+ p_hwfn->dpi_count,
+ (!qed_edpm_enabled(p_hwfn)) ?
+ "disabled" : "enabled", PAGE_SIZE);
+
+ if (rc) {
+ DP_ERR(p_hwfn,
+ "Failed to allocate enough DPIs. Allocated %d but the current minimum is %d.\n",
+ p_hwfn->dpi_count,
+ p_hwfn->pf_params.rdma_pf_params.min_dpis);
+ return -EINVAL;
+ }
+
+ p_hwfn->dpi_start_offset = norm_regsize;
+
+ /* DEMS size is configured log2 of DWORDs, hence the division by 4 */
+ pf_dems_shift = ilog2(QED_PF_DEMS_SIZE / 4);
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_ICID_BIT_SHIFT_NORM, pf_dems_shift);
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_MIN_ADDR_REG1, min_addr_reg1);
+
+ return 0;
+}
+
+static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, int hw_mode)
+{
+ int rc = 0;
+
+ /* In CMT the gate should be cleared by the 2nd hwfn */
+ if (!QED_IS_CMT(p_hwfn->cdev) || !IS_LEAD_HWFN(p_hwfn))
+ STORE_RT_REG(p_hwfn, NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET, 0);
+
+ rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id, hw_mode);
+ if (rc)
+ return rc;
+
+ qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_WRITE_PAD_ENABLE, 0);
+
+ return 0;
+}
+
+static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_tunnel_info *p_tunn,
+ int hw_mode,
+ bool b_hw_start,
+ enum qed_int_mode int_mode,
+ bool allow_npar_tx_switch)
+{
+ u8 rel_pf_id = p_hwfn->rel_pf_id;
+ int rc = 0;
+
+ if (p_hwfn->mcp_info) {
+ struct qed_mcp_function_info *p_info;
+
+ p_info = &p_hwfn->mcp_info->func_info;
+ if (p_info->bandwidth_min)
+ p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min;
+
+ /* Update rate limit once we'll actually have a link */
+ p_hwfn->qm_info.pf_rl = 100000;
+ }
+
+ qed_cxt_hw_init_pf(p_hwfn, p_ptt);
+
+ qed_int_igu_init_rt(p_hwfn);
+
+ /* Set VLAN in NIG if needed */
+ if (hw_mode & BIT(MODE_MF_SD)) {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "Configuring LLH_FUNC_TAG\n");
+ STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1);
+ STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET,
+ p_hwfn->hw_info.ovlan);
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+ "Configuring LLH_FUNC_FILTER_HDR_SEL\n");
+ STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET,
+ 1);
+ }
+
+ /* Enable classification by MAC if needed */
+ if (hw_mode & BIT(MODE_MF_SI)) {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+ "Configuring TAGMAC_CLS_TYPE\n");
+ STORE_RT_REG(p_hwfn,
+ NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1);
+ }
+
+ /* Protocol Configuration */
+ STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET,
+ ((p_hwfn->hw_info.personality == QED_PCI_ISCSI) ||
+ (p_hwfn->hw_info.personality == QED_PCI_NVMETCP)) ? 1 : 0);
+ STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET,
+ (p_hwfn->hw_info.personality == QED_PCI_FCOE) ? 1 : 0);
+ STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0);
+
+ /* Sanity check before the PF init sequence that uses DMAE */
+ rc = qed_dmae_sanity(p_hwfn, p_ptt, "pf_phase");
+ if (rc)
+ return rc;
+
+ /* PF Init sequence */
+ rc = qed_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode);
+ if (rc)
+ return rc;
+
+ /* QM_PF Init sequence (may be invoked separately e.g. for DCB) */
+ rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, rel_pf_id, hw_mode);
+ if (rc)
+ return rc;
+
+ qed_fw_overlay_init_ram(p_hwfn, p_ptt, p_hwfn->fw_overlay_mem);
+
+ /* Pure runtime initializations - directly to the HW */
+ qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true);
+
+ rc = qed_hw_init_pf_doorbell_bar(p_hwfn, p_ptt);
+ if (rc)
+ return rc;
+
+ /* Use the leading hwfn since in CMT only NIG #0 is operational */
+ if (IS_LEAD_HWFN(p_hwfn)) {
+ rc = qed_llh_hw_init_pf(p_hwfn, p_ptt);
+ if (rc)
+ return rc;
+ }
+
+ if (b_hw_start) {
+ /* enable interrupts */
+ qed_int_igu_enable(p_hwfn, p_ptt, int_mode);
+
+ /* send function start command */
+ rc = qed_sp_pf_start(p_hwfn, p_ptt, p_tunn,
+ allow_npar_tx_switch);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Function start ramrod failed\n");
+ return rc;
+ }
+ if (p_hwfn->hw_info.personality == QED_PCI_FCOE) {
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1, BIT(2));
+ qed_wr(p_hwfn, p_ptt,
+ PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST,
+ 0x100);
+ }
+ }
+ return rc;
+}
+
+int qed_pglueb_set_pfid_enable(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, bool b_enable)
+{
+ u32 delay_idx = 0, val, set_val = b_enable ? 1 : 0;
+
+ /* Configure the PF's internal FID_enable for master transactions */
+ qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val);
+
+ /* Wait until value is set - try for 1 second every 50us */
+ for (delay_idx = 0; delay_idx < 20000; delay_idx++) {
+ val = qed_rd(p_hwfn, p_ptt,
+ PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
+ if (val == set_val)
+ break;
+
+ usleep_range(50, 60);
+ }
+
+ if (val != set_val) {
+ DP_NOTICE(p_hwfn,
+ "PFID_ENABLE_MASTER wasn't changed after a second\n");
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_main_ptt)
+{
+ /* Read shadow of current MFW mailbox */
+ qed_mcp_read_mb(p_hwfn, p_main_ptt);
+ memcpy(p_hwfn->mcp_info->mfw_mb_shadow,
+ p_hwfn->mcp_info->mfw_mb_cur, p_hwfn->mcp_info->mfw_mb_length);
+}
+
+static void
+qed_fill_load_req_params(struct qed_load_req_params *p_load_req,
+ struct qed_drv_load_params *p_drv_load)
+{
+ memset(p_load_req, 0, sizeof(*p_load_req));
+
+ p_load_req->drv_role = p_drv_load->is_crash_kernel ?
+ QED_DRV_ROLE_KDUMP : QED_DRV_ROLE_OS;
+ p_load_req->timeout_val = p_drv_load->mfw_timeout_val;
+ p_load_req->avoid_eng_reset = p_drv_load->avoid_eng_reset;
+ p_load_req->override_force_load = p_drv_load->override_force_load;
+}
+
+static int qed_vf_start(struct qed_hwfn *p_hwfn,
+ struct qed_hw_init_params *p_params)
+{
+ if (p_params->p_tunn) {
+ qed_vf_set_vf_start_tunn_update_param(p_params->p_tunn);
+ qed_vf_pf_tunnel_param_update(p_hwfn, p_params->p_tunn);
+ }
+
+ p_hwfn->b_int_enabled = true;
+
+ return 0;
+}
+
+static void qed_pglueb_clear_err(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR,
+ BIT(p_hwfn->abs_pf_id));
+}
+
+int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
+{
+ struct qed_load_req_params load_req_params;
+ u32 load_code, resp, param, drv_mb_param;
+ bool b_default_mtu = true;
+ struct qed_hwfn *p_hwfn;
+ const u32 *fw_overlays;
+ u32 fw_overlays_len;
+ u16 ether_type;
+ int rc = 0, i;
+
+ if ((p_params->int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
+ DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
+ return -EINVAL;
+ }
+
+ if (IS_PF(cdev)) {
+ rc = qed_init_fw_data(cdev, p_params->bin_fw_data);
+ if (rc)
+ return rc;
+ }
+
+ for_each_hwfn(cdev, i) {
+ p_hwfn = &cdev->hwfns[i];
+
+ /* If management didn't provide a default, set one of our own */
+ if (!p_hwfn->hw_info.mtu) {
+ p_hwfn->hw_info.mtu = 1500;
+ b_default_mtu = false;
+ }
+
+ if (IS_VF(cdev)) {
+ qed_vf_start(p_hwfn, p_params);
+ continue;
+ }
+
+ /* Some flows may keep variable set */
+ p_hwfn->mcp_info->mcp_handling_status = 0;
+
+ rc = qed_calc_hw_mode(p_hwfn);
+ if (rc)
+ return rc;
+
+ if (IS_PF(cdev) && (test_bit(QED_MF_8021Q_TAGGING,
+ &cdev->mf_bits) ||
+ test_bit(QED_MF_8021AD_TAGGING,
+ &cdev->mf_bits))) {
+ if (test_bit(QED_MF_8021Q_TAGGING, &cdev->mf_bits))
+ ether_type = ETH_P_8021Q;
+ else
+ ether_type = ETH_P_8021AD;
+ STORE_RT_REG(p_hwfn, PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET,
+ ether_type);
+ STORE_RT_REG(p_hwfn, NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET,
+ ether_type);
+ STORE_RT_REG(p_hwfn, PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET,
+ ether_type);
+ STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET,
+ ether_type);
+ }
+
+ qed_fill_load_req_params(&load_req_params,
+ p_params->p_drv_load_params);
+ rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt,
+ &load_req_params);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed sending a LOAD_REQ command\n");
+ return rc;
+ }
+
+ load_code = load_req_params.load_code;
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "Load request was sent. Load code: 0x%x\n",
+ load_code);
+
+ /* Only relevant for recovery:
+ * Clear the indication after LOAD_REQ is responded by the MFW.
+ */
+ cdev->recov_in_prog = false;
+
+ qed_mcp_set_capabilities(p_hwfn, p_hwfn->p_main_ptt);
+
+ qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
+
+ /* Clean up chip from previous driver if such remains exist.
+ * This is not needed when the PF is the first one on the
+ * engine, since afterwards we are going to init the FW.
+ */
+ if (load_code != FW_MSG_CODE_DRV_LOAD_ENGINE) {
+ rc = qed_final_cleanup(p_hwfn, p_hwfn->p_main_ptt,
+ p_hwfn->rel_pf_id, false);
+ if (rc) {
+ qed_hw_err_notify(p_hwfn, p_hwfn->p_main_ptt,
+ QED_HW_ERR_RAMROD_FAIL,
+ "Final cleanup failed\n");
+ goto load_err;
+ }
+ }
+
+ /* Log and clear previous pglue_b errors if such exist */
+ qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt, true);
+
+ /* Enable the PF's internal FID_enable in the PXP */
+ rc = qed_pglueb_set_pfid_enable(p_hwfn, p_hwfn->p_main_ptt,
+ true);
+ if (rc)
+ goto load_err;
+
+ /* Clear the pglue_b was_error indication.
+ * In E4 it must be done after the BME and the internal
+ * FID_enable for the PF are set, since VDMs may cause the
+ * indication to be set again.
+ */
+ qed_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt);
+
+ fw_overlays = cdev->fw_data->fw_overlays;
+ fw_overlays_len = cdev->fw_data->fw_overlays_len;
+ p_hwfn->fw_overlay_mem =
+ qed_fw_overlay_mem_alloc(p_hwfn, fw_overlays,
+ fw_overlays_len);
+ if (!p_hwfn->fw_overlay_mem) {
+ DP_NOTICE(p_hwfn,
+ "Failed to allocate fw overlay memory\n");
+ rc = -ENOMEM;
+ goto load_err;
+ }
+
+ switch (load_code) {
+ case FW_MSG_CODE_DRV_LOAD_ENGINE:
+ rc = qed_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
+ p_hwfn->hw_info.hw_mode);
+ if (rc)
+ break;
+ fallthrough;
+ case FW_MSG_CODE_DRV_LOAD_PORT:
+ rc = qed_hw_init_port(p_hwfn, p_hwfn->p_main_ptt,
+ p_hwfn->hw_info.hw_mode);
+ if (rc)
+ break;
+
+ fallthrough;
+ case FW_MSG_CODE_DRV_LOAD_FUNCTION:
+ rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
+ p_params->p_tunn,
+ p_hwfn->hw_info.hw_mode,
+ p_params->b_hw_start,
+ p_params->int_mode,
+ p_params->allow_npar_tx_switch);
+ break;
+ default:
+ DP_NOTICE(p_hwfn,
+ "Unexpected load code [0x%08x]", load_code);
+ rc = -EINVAL;
+ break;
+ }
+
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "init phase failed for loadcode 0x%x (rc %d)\n",
+ load_code, rc);
+ goto load_err;
+ }
+
+ rc = qed_mcp_load_done(p_hwfn, p_hwfn->p_main_ptt);
+ if (rc)
+ return rc;
+
+ /* send DCBX attention request command */
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DCB,
+ "sending phony dcbx set command to trigger DCBx attention handling\n");
+ rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+ DRV_MSG_CODE_SET_DCBX,
+ 1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT,
+ &resp, &param);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "Failed to send DCBX attention request\n");
+ return rc;
+ }
+
+ p_hwfn->hw_init_done = true;
+ }
+
+ if (IS_PF(cdev)) {
+ p_hwfn = QED_LEADING_HWFN(cdev);
+
+ /* Get pre-negotiated values for stag, bandwidth etc. */
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SPQ,
+ "Sending GET_OEM_UPDATES command to trigger stag/bandwidth attention handling\n");
+ drv_mb_param = 1 << DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET;
+ rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+ DRV_MSG_CODE_GET_OEM_UPDATES,
+ drv_mb_param, &resp, &param);
+ if (rc)
+ DP_NOTICE(p_hwfn,
+ "Failed to send GET_OEM_UPDATES attention request\n");
+
+ drv_mb_param = STORM_FW_VERSION;
+ rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+ DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER,
+ drv_mb_param, &load_code, &param);
+ if (rc)
+ DP_INFO(p_hwfn, "Failed to update firmware version\n");
+
+ if (!b_default_mtu) {
+ rc = qed_mcp_ov_update_mtu(p_hwfn, p_hwfn->p_main_ptt,
+ p_hwfn->hw_info.mtu);
+ if (rc)
+ DP_INFO(p_hwfn,
+ "Failed to update default mtu\n");
+ }
+
+ rc = qed_mcp_ov_update_driver_state(p_hwfn,
+ p_hwfn->p_main_ptt,
+ QED_OV_DRIVER_STATE_DISABLED);
+ if (rc)
+ DP_INFO(p_hwfn, "Failed to update driver state\n");
+
+ rc = qed_mcp_ov_update_eswitch(p_hwfn, p_hwfn->p_main_ptt,
+ QED_OV_ESWITCH_NONE);
+ if (rc)
+ DP_INFO(p_hwfn, "Failed to update eswitch mode\n");
+ }
+
+ return 0;
+
+load_err:
+ /* The MFW load lock should be released also when initialization fails.
+ */
+ qed_mcp_load_done(p_hwfn, p_hwfn->p_main_ptt);
+ return rc;
+}
+
+#define QED_HW_STOP_RETRY_LIMIT (10)
+static void qed_hw_timers_stop(struct qed_dev *cdev,
+ struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ int i;
+
+ /* close timers */
+ qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
+ qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
+
+ if (cdev->recov_in_prog)
+ return;
+
+ for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) {
+ if ((!qed_rd(p_hwfn, p_ptt,
+ TM_REG_PF_SCAN_ACTIVE_CONN)) &&
+ (!qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK)))
+ break;
+
+ /* Dependent on number of connection/tasks, possibly
+ * 1ms sleep is required between polls
+ */
+ usleep_range(1000, 2000);
+ }
+
+ if (i < QED_HW_STOP_RETRY_LIMIT)
+ return;
+
+ DP_NOTICE(p_hwfn,
+ "Timers linear scans are not over [Connection %02x Tasks %02x]\n",
+ (u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN),
+ (u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK));
+}
+
+void qed_hw_timers_stop_all(struct qed_dev *cdev)
+{
+ int j;
+
+ for_each_hwfn(cdev, j) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
+ struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
+
+ qed_hw_timers_stop(cdev, p_hwfn, p_ptt);
+ }
+}
+
+int qed_hw_stop(struct qed_dev *cdev)
+{
+ struct qed_hwfn *p_hwfn;
+ struct qed_ptt *p_ptt;
+ int rc, rc2 = 0;
+ int j;
+
+ for_each_hwfn(cdev, j) {
+ p_hwfn = &cdev->hwfns[j];
+ p_ptt = p_hwfn->p_main_ptt;
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Stopping hw/fw\n");
+
+ if (IS_VF(cdev)) {
+ qed_vf_pf_int_cleanup(p_hwfn);
+ rc = qed_vf_pf_reset(p_hwfn);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "qed_vf_pf_reset failed. rc = %d.\n",
+ rc);
+ rc2 = -EINVAL;
+ }
+ continue;
+ }
+
+ /* mark the hw as uninitialized... */
+ p_hwfn->hw_init_done = false;
+
+ /* Send unload command to MCP */
+ if (!cdev->recov_in_prog) {
+ rc = qed_mcp_unload_req(p_hwfn, p_ptt);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "Failed sending a UNLOAD_REQ command. rc = %d.\n",
+ rc);
+ rc2 = -EINVAL;
+ }
+ }
+
+ qed_slowpath_irq_sync(p_hwfn);
+
+ /* After this point no MFW attentions are expected, e.g. prevent
+ * race between pf stop and dcbx pf update.
+ */
+ rc = qed_sp_pf_stop(p_hwfn);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "Failed to close PF against FW [rc = %d]. Continue to stop HW to prevent illegal host access by the device.\n",
+ rc);
+ rc2 = -EINVAL;
+ }
+
+ qed_wr(p_hwfn, p_ptt,
+ NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
+
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
+
+ qed_hw_timers_stop(cdev, p_hwfn, p_ptt);
+
+ /* Disable Attention Generation */
+ qed_int_igu_disable_int(p_hwfn, p_ptt);
+
+ qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0);
+ qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0);
+
+ qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true);
+
+ /* Need to wait 1ms to guarantee SBs are cleared */
+ usleep_range(1000, 2000);
+
+ /* Disable PF in HW blocks */
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DB_ENABLE, 0);
+ qed_wr(p_hwfn, p_ptt, QM_REG_PF_EN, 0);
+
+ if (IS_LEAD_HWFN(p_hwfn) &&
+ test_bit(QED_MF_LLH_MAC_CLSS, &cdev->mf_bits) &&
+ !QED_IS_FCOE_PERSONALITY(p_hwfn))
+ qed_llh_remove_mac_filter(cdev, 0,
+ p_hwfn->hw_info.hw_mac_addr);
+
+ if (!cdev->recov_in_prog) {
+ rc = qed_mcp_unload_done(p_hwfn, p_ptt);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "Failed sending a UNLOAD_DONE command. rc = %d.\n",
+ rc);
+ rc2 = -EINVAL;
+ }
+ }
+ }
+
+ if (IS_PF(cdev) && !cdev->recov_in_prog) {
+ p_hwfn = QED_LEADING_HWFN(cdev);
+ p_ptt = QED_LEADING_HWFN(cdev)->p_main_ptt;
+
+ /* Clear the PF's internal FID_enable in the PXP.
+ * In CMT this should only be done for first hw-function, and
+ * only after all transactions have stopped for all active
+ * hw-functions.
+ */
+ rc = qed_pglueb_set_pfid_enable(p_hwfn, p_ptt, false);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "qed_pglueb_set_pfid_enable() failed. rc = %d.\n",
+ rc);
+ rc2 = -EINVAL;
+ }
+ }
+
+ return rc2;
+}
+
+int qed_hw_stop_fastpath(struct qed_dev *cdev)
+{
+ int j;
+
+ for_each_hwfn(cdev, j) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
+ struct qed_ptt *p_ptt;
+
+ if (IS_VF(cdev)) {
+ qed_vf_pf_int_cleanup(p_hwfn);
+ continue;
+ }
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EAGAIN;
+
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_IFDOWN, "Shutting down the fastpath\n");
+
+ qed_wr(p_hwfn, p_ptt,
+ NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
+
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
+
+ qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false);
+
+ /* Need to wait 1ms to guarantee SBs are cleared */
+ usleep_range(1000, 2000);
+ qed_ptt_release(p_hwfn, p_ptt);
+ }
+
+ return 0;
+}
+
+int qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ptt *p_ptt;
+
+ if (IS_VF(p_hwfn->cdev))
+ return 0;
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EAGAIN;
+
+ if (p_hwfn->p_rdma_info &&
+ p_hwfn->p_rdma_info->active && p_hwfn->b_rdma_enabled_in_prs)
+ qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0x1);
+
+ /* Re-open incoming traffic */
+ qed_wr(p_hwfn, p_ptt, NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ return 0;
+}
+
+/* Free hwfn memory and resources acquired in hw_hwfn_prepare */
+static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn)
+{
+ qed_ptt_pool_free(p_hwfn);
+ kfree(p_hwfn->hw_info.p_igu_info);
+ p_hwfn->hw_info.p_igu_info = NULL;
+}
+
+/* Setup bar access */
+static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
+{
+ /* clear indirect access */
+ if (QED_IS_AH(p_hwfn->cdev)) {
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_E8_F0_K2, 0);
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_EC_F0_K2, 0);
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_F0_F0_K2, 0);
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_F4_F0_K2, 0);
+ } else {
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_88_F0_BB, 0);
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_8C_F0_BB, 0);
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_90_F0_BB, 0);
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_PGL_ADDR_94_F0_BB, 0);
+ }
+
+ /* Clean previous pglue_b errors if such exist */
+ qed_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt);
+
+ /* enable internal target-read */
+ qed_wr(p_hwfn, p_hwfn->p_main_ptt,
+ PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
+}
+
+static void get_function_id(struct qed_hwfn *p_hwfn)
+{
+ /* ME Register */
+ p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn,
+ PXP_PF_ME_OPAQUE_ADDR);
+
+ p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR);
+
+ p_hwfn->abs_pf_id = (p_hwfn->hw_info.concrete_fid >> 16) & 0xf;
+ p_hwfn->rel_pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
+ PXP_CONCRETE_FID_PFID);
+ p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
+ PXP_CONCRETE_FID_PORT);
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE,
+ "Read ME register: Concrete 0x%08x Opaque 0x%04x\n",
+ p_hwfn->hw_info.concrete_fid, p_hwfn->hw_info.opaque_fid);
+}
+
+static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
+{
+ u32 *feat_num = p_hwfn->hw_info.feat_num;
+ struct qed_sb_cnt_info sb_cnt;
+ u32 non_l2_sbs = 0;
+
+ memset(&sb_cnt, 0, sizeof(sb_cnt));
+ qed_int_get_num_sbs(p_hwfn, &sb_cnt);
+
+ if (IS_ENABLED(CONFIG_QED_RDMA) &&
+ QED_IS_RDMA_PERSONALITY(p_hwfn)) {
+ /* Roce CNQ each requires: 1 status block + 1 CNQ. We divide
+ * the status blocks equally between L2 / RoCE but with
+ * consideration as to how many l2 queues / cnqs we have.
+ */
+ feat_num[QED_RDMA_CNQ] =
+ min_t(u32, sb_cnt.cnt / 2,
+ RESC_NUM(p_hwfn, QED_RDMA_CNQ_RAM));
+
+ non_l2_sbs = feat_num[QED_RDMA_CNQ];
+ }
+ if (QED_IS_L2_PERSONALITY(p_hwfn)) {
+ /* Start by allocating VF queues, then PF's */
+ feat_num[QED_VF_L2_QUE] = min_t(u32,
+ RESC_NUM(p_hwfn, QED_L2_QUEUE),
+ sb_cnt.iov_cnt);
+ feat_num[QED_PF_L2_QUE] = min_t(u32,
+ sb_cnt.cnt - non_l2_sbs,
+ RESC_NUM(p_hwfn,
+ QED_L2_QUEUE) -
+ FEAT_NUM(p_hwfn,
+ QED_VF_L2_QUE));
+ }
+
+ if (QED_IS_FCOE_PERSONALITY(p_hwfn))
+ feat_num[QED_FCOE_CQ] = min_t(u32, sb_cnt.cnt,
+ RESC_NUM(p_hwfn,
+ QED_CMDQS_CQS));
+
+ if (QED_IS_ISCSI_PERSONALITY(p_hwfn))
+ feat_num[QED_ISCSI_CQ] = min_t(u32, sb_cnt.cnt,
+ RESC_NUM(p_hwfn,
+ QED_CMDQS_CQS));
+
+ if (QED_IS_NVMETCP_PERSONALITY(p_hwfn))
+ feat_num[QED_NVMETCP_CQ] = min_t(u32, sb_cnt.cnt,
+ RESC_NUM(p_hwfn,
+ QED_CMDQS_CQS));
+
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_PROBE,
+ "#PF_L2_QUEUES=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d FCOE_CQ=%d ISCSI_CQ=%d NVMETCP_CQ=%d #SBS=%d\n",
+ (int)FEAT_NUM(p_hwfn, QED_PF_L2_QUE),
+ (int)FEAT_NUM(p_hwfn, QED_VF_L2_QUE),
+ (int)FEAT_NUM(p_hwfn, QED_RDMA_CNQ),
+ (int)FEAT_NUM(p_hwfn, QED_FCOE_CQ),
+ (int)FEAT_NUM(p_hwfn, QED_ISCSI_CQ),
+ (int)FEAT_NUM(p_hwfn, QED_NVMETCP_CQ),
+ (int)sb_cnt.cnt);
+}
+
+const char *qed_hw_get_resc_name(enum qed_resources res_id)
+{
+ switch (res_id) {
+ case QED_L2_QUEUE:
+ return "L2_QUEUE";
+ case QED_VPORT:
+ return "VPORT";
+ case QED_RSS_ENG:
+ return "RSS_ENG";
+ case QED_PQ:
+ return "PQ";
+ case QED_RL:
+ return "RL";
+ case QED_MAC:
+ return "MAC";
+ case QED_VLAN:
+ return "VLAN";
+ case QED_RDMA_CNQ_RAM:
+ return "RDMA_CNQ_RAM";
+ case QED_ILT:
+ return "ILT";
+ case QED_LL2_RAM_QUEUE:
+ return "LL2_RAM_QUEUE";
+ case QED_LL2_CTX_QUEUE:
+ return "LL2_CTX_QUEUE";
+ case QED_CMDQS_CQS:
+ return "CMDQS_CQS";
+ case QED_RDMA_STATS_QUEUE:
+ return "RDMA_STATS_QUEUE";
+ case QED_BDQ:
+ return "BDQ";
+ case QED_SB:
+ return "SB";
+ default:
+ return "UNKNOWN_RESOURCE";
+ }
+}
+
+static int
+__qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_resources res_id,
+ u32 resc_max_val, u32 *p_mcp_resp)
+{
+ int rc;
+
+ rc = qed_mcp_set_resc_max_val(p_hwfn, p_ptt, res_id,
+ resc_max_val, p_mcp_resp);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "MFW response failure for a max value setting of resource %d [%s]\n",
+ res_id, qed_hw_get_resc_name(res_id));
+ return rc;
+ }
+
+ if (*p_mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK)
+ DP_INFO(p_hwfn,
+ "Failed to set the max value of resource %d [%s]. mcp_resp = 0x%08x.\n",
+ res_id, qed_hw_get_resc_name(res_id), *p_mcp_resp);
+
+ return 0;
+}
+
+static u32 qed_hsi_def_val[][MAX_CHIP_IDS] = {
+ {MAX_NUM_VFS_BB, MAX_NUM_VFS_K2},
+ {MAX_NUM_L2_QUEUES_BB, MAX_NUM_L2_QUEUES_K2},
+ {MAX_NUM_PORTS_BB, MAX_NUM_PORTS_K2},
+ {MAX_SB_PER_PATH_BB, MAX_SB_PER_PATH_K2,},
+ {MAX_NUM_PFS_BB, MAX_NUM_PFS_K2},
+ {MAX_NUM_VPORTS_BB, MAX_NUM_VPORTS_K2},
+ {ETH_RSS_ENGINE_NUM_BB, ETH_RSS_ENGINE_NUM_K2},
+ {MAX_QM_TX_QUEUES_BB, MAX_QM_TX_QUEUES_K2},
+ {PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2},
+ {RDMA_NUM_STATISTIC_COUNTERS_BB, RDMA_NUM_STATISTIC_COUNTERS_K2},
+ {MAX_QM_GLOBAL_RLS, MAX_QM_GLOBAL_RLS},
+ {PBF_MAX_CMD_LINES, PBF_MAX_CMD_LINES},
+ {BTB_MAX_BLOCKS_BB, BTB_MAX_BLOCKS_K2},
+};
+
+u32 qed_get_hsi_def_val(struct qed_dev *cdev, enum qed_hsi_def_type type)
+{
+ enum chip_ids chip_id = QED_IS_BB(cdev) ? CHIP_BB : CHIP_K2;
+
+ if (type >= QED_NUM_HSI_DEFS) {
+ DP_ERR(cdev, "Unexpected HSI definition type [%d]\n", type);
+ return 0;
+ }
+
+ return qed_hsi_def_val[type][chip_id];
+}
+
+static int
+qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 resc_max_val, mcp_resp;
+ u8 res_id;
+ int rc;
+
+ for (res_id = 0; res_id < QED_MAX_RESC; res_id++) {
+ switch (res_id) {
+ case QED_LL2_RAM_QUEUE:
+ resc_max_val = MAX_NUM_LL2_RX_RAM_QUEUES;
+ break;
+ case QED_LL2_CTX_QUEUE:
+ resc_max_val = MAX_NUM_LL2_RX_CTX_QUEUES;
+ break;
+ case QED_RDMA_CNQ_RAM:
+ /* No need for a case for QED_CMDQS_CQS since
+ * CNQ/CMDQS are the same resource.
+ */
+ resc_max_val = NUM_OF_GLOBAL_QUEUES;
+ break;
+ case QED_RDMA_STATS_QUEUE:
+ resc_max_val =
+ NUM_OF_RDMA_STATISTIC_COUNTERS(p_hwfn->cdev);
+ break;
+ case QED_BDQ:
+ resc_max_val = BDQ_NUM_RESOURCES;
+ break;
+ default:
+ continue;
+ }
+
+ rc = __qed_hw_set_soft_resc_size(p_hwfn, p_ptt, res_id,
+ resc_max_val, &mcp_resp);
+ if (rc)
+ return rc;
+
+ /* There's no point to continue to the next resource if the
+ * command is not supported by the MFW.
+ * We do continue if the command is supported but the resource
+ * is unknown to the MFW. Such a resource will be later
+ * configured with the default allocation values.
+ */
+ if (mcp_resp == FW_MSG_CODE_UNSUPPORTED)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static
+int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn,
+ enum qed_resources res_id,
+ u32 *p_resc_num, u32 *p_resc_start)
+{
+ u8 num_funcs = p_hwfn->num_funcs_on_engine;
+ struct qed_dev *cdev = p_hwfn->cdev;
+
+ switch (res_id) {
+ case QED_L2_QUEUE:
+ *p_resc_num = NUM_OF_L2_QUEUES(cdev) / num_funcs;
+ break;
+ case QED_VPORT:
+ *p_resc_num = NUM_OF_VPORTS(cdev) / num_funcs;
+ break;
+ case QED_RSS_ENG:
+ *p_resc_num = NUM_OF_RSS_ENGINES(cdev) / num_funcs;
+ break;
+ case QED_PQ:
+ *p_resc_num = NUM_OF_QM_TX_QUEUES(cdev) / num_funcs;
+ *p_resc_num &= ~0x7; /* The granularity of the PQs is 8 */
+ break;
+ case QED_RL:
+ *p_resc_num = NUM_OF_QM_GLOBAL_RLS(cdev) / num_funcs;
+ break;
+ case QED_MAC:
+ case QED_VLAN:
+ /* Each VFC resource can accommodate both a MAC and a VLAN */
+ *p_resc_num = ETH_NUM_MAC_FILTERS / num_funcs;
+ break;
+ case QED_ILT:
+ *p_resc_num = NUM_OF_PXP_ILT_RECORDS(cdev) / num_funcs;
+ break;
+ case QED_LL2_RAM_QUEUE:
+ *p_resc_num = MAX_NUM_LL2_RX_RAM_QUEUES / num_funcs;
+ break;
+ case QED_LL2_CTX_QUEUE:
+ *p_resc_num = MAX_NUM_LL2_RX_CTX_QUEUES / num_funcs;
+ break;
+ case QED_RDMA_CNQ_RAM:
+ case QED_CMDQS_CQS:
+ /* CNQ/CMDQS are the same resource */
+ *p_resc_num = NUM_OF_GLOBAL_QUEUES / num_funcs;
+ break;
+ case QED_RDMA_STATS_QUEUE:
+ *p_resc_num = NUM_OF_RDMA_STATISTIC_COUNTERS(cdev) / num_funcs;
+ break;
+ case QED_BDQ:
+ if (p_hwfn->hw_info.personality != QED_PCI_ISCSI &&
+ p_hwfn->hw_info.personality != QED_PCI_FCOE &&
+ p_hwfn->hw_info.personality != QED_PCI_NVMETCP)
+ *p_resc_num = 0;
+ else
+ *p_resc_num = 1;
+ break;
+ case QED_SB:
+ /* Since we want its value to reflect whether MFW supports
+ * the new scheme, have a default of 0.
+ */
+ *p_resc_num = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (res_id) {
+ case QED_BDQ:
+ if (!*p_resc_num)
+ *p_resc_start = 0;
+ else if (p_hwfn->cdev->num_ports_in_engine == 4)
+ *p_resc_start = p_hwfn->port_id;
+ else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI ||
+ p_hwfn->hw_info.personality == QED_PCI_NVMETCP)
+ *p_resc_start = p_hwfn->port_id;
+ else if (p_hwfn->hw_info.personality == QED_PCI_FCOE)
+ *p_resc_start = p_hwfn->port_id + 2;
+ break;
+ default:
+ *p_resc_start = *p_resc_num * p_hwfn->enabled_func_idx;
+ break;
+ }
+
+ return 0;
+}
+
+static int __qed_hw_set_resc_info(struct qed_hwfn *p_hwfn,
+ enum qed_resources res_id)
+{
+ u32 dflt_resc_num = 0, dflt_resc_start = 0;
+ u32 mcp_resp, *p_resc_num, *p_resc_start;
+ int rc;
+
+ p_resc_num = &RESC_NUM(p_hwfn, res_id);
+ p_resc_start = &RESC_START(p_hwfn, res_id);
+
+ rc = qed_hw_get_dflt_resc(p_hwfn, res_id, &dflt_resc_num,
+ &dflt_resc_start);
+ if (rc) {
+ DP_ERR(p_hwfn,
+ "Failed to get default amount for resource %d [%s]\n",
+ res_id, qed_hw_get_resc_name(res_id));
+ return rc;
+ }
+
+ rc = qed_mcp_get_resc_info(p_hwfn, p_hwfn->p_main_ptt, res_id,
+ &mcp_resp, p_resc_num, p_resc_start);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "MFW response failure for an allocation request for resource %d [%s]\n",
+ res_id, qed_hw_get_resc_name(res_id));
+ return rc;
+ }
+
+ /* Default driver values are applied in the following cases:
+ * - The resource allocation MB command is not supported by the MFW
+ * - There is an internal error in the MFW while processing the request
+ * - The resource ID is unknown to the MFW
+ */
+ if (mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK) {
+ DP_INFO(p_hwfn,
+ "Failed to receive allocation info for resource %d [%s]. mcp_resp = 0x%x. Applying default values [%d,%d].\n",
+ res_id,
+ qed_hw_get_resc_name(res_id),
+ mcp_resp, dflt_resc_num, dflt_resc_start);
+ *p_resc_num = dflt_resc_num;
+ *p_resc_start = dflt_resc_start;
+ goto out;
+ }
+
+out:
+ /* PQs have to divide by 8 [that's the HW granularity].
+ * Reduce number so it would fit.
+ */
+ if ((res_id == QED_PQ) && ((*p_resc_num % 8) || (*p_resc_start % 8))) {
+ DP_INFO(p_hwfn,
+ "PQs need to align by 8; Number %08x --> %08x, Start %08x --> %08x\n",
+ *p_resc_num,
+ (*p_resc_num) & ~0x7,
+ *p_resc_start, (*p_resc_start) & ~0x7);
+ *p_resc_num &= ~0x7;
+ *p_resc_start &= ~0x7;
+ }
+
+ return 0;
+}
+
+static int qed_hw_set_resc_info(struct qed_hwfn *p_hwfn)
+{
+ int rc;
+ u8 res_id;
+
+ for (res_id = 0; res_id < QED_MAX_RESC; res_id++) {
+ rc = __qed_hw_set_resc_info(p_hwfn, res_id);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+static int qed_hw_get_ppfid_bitmap(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct qed_dev *cdev = p_hwfn->cdev;
+ u8 native_ppfid_idx;
+ int rc;
+
+ /* Calculation of BB/AH is different for native_ppfid_idx */
+ if (QED_IS_BB(cdev))
+ native_ppfid_idx = p_hwfn->rel_pf_id;
+ else
+ native_ppfid_idx = p_hwfn->rel_pf_id /
+ cdev->num_ports_in_engine;
+
+ rc = qed_mcp_get_ppfid_bitmap(p_hwfn, p_ptt);
+ if (rc != 0 && rc != -EOPNOTSUPP)
+ return rc;
+ else if (rc == -EOPNOTSUPP)
+ cdev->ppfid_bitmap = 0x1 << native_ppfid_idx;
+
+ if (!(cdev->ppfid_bitmap & (0x1 << native_ppfid_idx))) {
+ DP_INFO(p_hwfn,
+ "Fix the PPFID bitmap to include the native PPFID [native_ppfid_idx %hhd, orig_bitmap 0x%hhx]\n",
+ native_ppfid_idx, cdev->ppfid_bitmap);
+ cdev->ppfid_bitmap = 0x1 << native_ppfid_idx;
+ }
+
+ return 0;
+}
+
+static int qed_hw_get_resc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_resc_unlock_params resc_unlock_params;
+ struct qed_resc_lock_params resc_lock_params;
+ bool b_ah = QED_IS_AH(p_hwfn->cdev);
+ u8 res_id;
+ int rc;
+
+ /* Setting the max values of the soft resources and the following
+ * resources allocation queries should be atomic. Since several PFs can
+ * run in parallel - a resource lock is needed.
+ * If either the resource lock or resource set value commands are not
+ * supported - skip the max values setting, release the lock if
+ * needed, and proceed to the queries. Other failures, including a
+ * failure to acquire the lock, will cause this function to fail.
+ */
+ qed_mcp_resc_lock_default_init(&resc_lock_params, &resc_unlock_params,
+ QED_RESC_LOCK_RESC_ALLOC, false);
+
+ rc = qed_mcp_resc_lock(p_hwfn, p_ptt, &resc_lock_params);
+ if (rc && rc != -EINVAL) {
+ return rc;
+ } else if (rc == -EINVAL) {
+ DP_INFO(p_hwfn,
+ "Skip the max values setting of the soft resources since the resource lock is not supported by the MFW\n");
+ } else if (!resc_lock_params.b_granted) {
+ DP_NOTICE(p_hwfn,
+ "Failed to acquire the resource lock for the resource allocation commands\n");
+ return -EBUSY;
+ } else {
+ rc = qed_hw_set_soft_resc_size(p_hwfn, p_ptt);
+ if (rc && rc != -EINVAL) {
+ DP_NOTICE(p_hwfn,
+ "Failed to set the max values of the soft resources\n");
+ goto unlock_and_exit;
+ } else if (rc == -EINVAL) {
+ DP_INFO(p_hwfn,
+ "Skip the max values setting of the soft resources since it is not supported by the MFW\n");
+ rc = qed_mcp_resc_unlock(p_hwfn, p_ptt,
+ &resc_unlock_params);
+ if (rc)
+ DP_INFO(p_hwfn,
+ "Failed to release the resource lock for the resource allocation commands\n");
+ }
+ }
+
+ rc = qed_hw_set_resc_info(p_hwfn);
+ if (rc)
+ goto unlock_and_exit;
+
+ if (resc_lock_params.b_granted && !resc_unlock_params.b_released) {
+ rc = qed_mcp_resc_unlock(p_hwfn, p_ptt, &resc_unlock_params);
+ if (rc)
+ DP_INFO(p_hwfn,
+ "Failed to release the resource lock for the resource allocation commands\n");
+ }
+
+ /* PPFID bitmap */
+ if (IS_LEAD_HWFN(p_hwfn)) {
+ rc = qed_hw_get_ppfid_bitmap(p_hwfn, p_ptt);
+ if (rc)
+ return rc;
+ }
+
+ /* Sanity for ILT */
+ if ((b_ah && (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_K2)) ||
+ (!b_ah && (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_BB))) {
+ DP_NOTICE(p_hwfn, "Can't assign ILT pages [%08x,...,%08x]\n",
+ RESC_START(p_hwfn, QED_ILT),
+ RESC_END(p_hwfn, QED_ILT) - 1);
+ return -EINVAL;
+ }
+
+ /* This will also learn the number of SBs from MFW */
+ if (qed_int_igu_reset_cam(p_hwfn, p_ptt))
+ return -EINVAL;
+
+ qed_hw_set_feat(p_hwfn);
+
+ for (res_id = 0; res_id < QED_MAX_RESC; res_id++)
+ DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE, "%s = %d start = %d\n",
+ qed_hw_get_resc_name(res_id),
+ RESC_NUM(p_hwfn, res_id),
+ RESC_START(p_hwfn, res_id));
+
+ return 0;
+
+unlock_and_exit:
+ if (resc_lock_params.b_granted && !resc_unlock_params.b_released)
+ qed_mcp_resc_unlock(p_hwfn, p_ptt, &resc_unlock_params);
+ return rc;
+}
+
+static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities, fld;
+ u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg;
+ struct qed_mcp_link_speed_params *ext_speed;
+ struct qed_mcp_link_capabilities *p_caps;
+ struct qed_mcp_link_params *link;
+ int i;
+
+ /* Read global nvm_cfg address */
+ nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
+
+ /* Verify MCP has initialized it */
+ if (!nvm_cfg_addr) {
+ DP_NOTICE(p_hwfn, "Shared memory not initialized\n");
+ return -EINVAL;
+ }
+
+ /* Read nvm_cfg1 (Notice this is just offset, and not offsize (TBD) */
+ nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
+
+ addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ offsetof(struct nvm_cfg1, glob) +
+ offsetof(struct nvm_cfg1_glob, core_cfg);
+
+ core_cfg = qed_rd(p_hwfn, p_ptt, addr);
+
+ switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >>
+ NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) {
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G:
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G:
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G:
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F:
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E:
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G:
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G:
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G:
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G:
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G:
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G:
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_2X50G_R1:
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_4X50G_R1:
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_1X100G_R2:
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_2X100G_R2:
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_1X100G_R4:
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n", core_cfg);
+ break;
+ }
+
+ /* Read default link configuration */
+ link = &p_hwfn->mcp_info->link_input;
+ p_caps = &p_hwfn->mcp_info->link_capabilities;
+ port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
+ link_temp = qed_rd(p_hwfn, p_ptt,
+ port_cfg_addr +
+ offsetof(struct nvm_cfg1_port, speed_cap_mask));
+ link_temp &= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK;
+ link->speed.advertised_speeds = link_temp;
+
+ p_caps->speed_capabilities = link->speed.advertised_speeds;
+
+ link_temp = qed_rd(p_hwfn, p_ptt,
+ port_cfg_addr +
+ offsetof(struct nvm_cfg1_port, link_settings));
+ switch ((link_temp & NVM_CFG1_PORT_DRV_LINK_SPEED_MASK) >>
+ NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET) {
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG:
+ link->speed.autoneg = true;
+ break;
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_1G:
+ link->speed.forced_speed = 1000;
+ break;
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_10G:
+ link->speed.forced_speed = 10000;
+ break;
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_20G:
+ link->speed.forced_speed = 20000;
+ break;
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_25G:
+ link->speed.forced_speed = 25000;
+ break;
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_40G:
+ link->speed.forced_speed = 40000;
+ break;
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_50G:
+ link->speed.forced_speed = 50000;
+ break;
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G:
+ link->speed.forced_speed = 100000;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n", link_temp);
+ }
+
+ p_caps->default_speed_autoneg = link->speed.autoneg;
+
+ fld = GET_MFW_FIELD(link_temp, NVM_CFG1_PORT_DRV_FLOW_CONTROL);
+ link->pause.autoneg = !!(fld & NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG);
+ link->pause.forced_rx = !!(fld & NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX);
+ link->pause.forced_tx = !!(fld & NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX);
+ link->loopback_mode = 0;
+
+ if (p_hwfn->mcp_info->capabilities &
+ FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL) {
+ switch (GET_MFW_FIELD(link_temp,
+ NVM_CFG1_PORT_FEC_FORCE_MODE)) {
+ case NVM_CFG1_PORT_FEC_FORCE_MODE_NONE:
+ p_caps->fec_default |= QED_FEC_MODE_NONE;
+ break;
+ case NVM_CFG1_PORT_FEC_FORCE_MODE_FIRECODE:
+ p_caps->fec_default |= QED_FEC_MODE_FIRECODE;
+ break;
+ case NVM_CFG1_PORT_FEC_FORCE_MODE_RS:
+ p_caps->fec_default |= QED_FEC_MODE_RS;
+ break;
+ case NVM_CFG1_PORT_FEC_FORCE_MODE_AUTO:
+ p_caps->fec_default |= QED_FEC_MODE_AUTO;
+ break;
+ default:
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "unknown FEC mode in 0x%08x\n", link_temp);
+ }
+ } else {
+ p_caps->fec_default = QED_FEC_MODE_UNSUPPORTED;
+ }
+
+ link->fec = p_caps->fec_default;
+
+ if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) {
+ link_temp = qed_rd(p_hwfn, p_ptt, port_cfg_addr +
+ offsetof(struct nvm_cfg1_port, ext_phy));
+ link_temp &= NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_MASK;
+ link_temp >>= NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_OFFSET;
+ p_caps->default_eee = QED_MCP_EEE_ENABLED;
+ link->eee.enable = true;
+ switch (link_temp) {
+ case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_DISABLED:
+ p_caps->default_eee = QED_MCP_EEE_DISABLED;
+ link->eee.enable = false;
+ break;
+ case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_BALANCED:
+ p_caps->eee_lpi_timer = EEE_TX_TIMER_USEC_BALANCED_TIME;
+ break;
+ case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_AGGRESSIVE:
+ p_caps->eee_lpi_timer =
+ EEE_TX_TIMER_USEC_AGGRESSIVE_TIME;
+ break;
+ case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_LOW_LATENCY:
+ p_caps->eee_lpi_timer = EEE_TX_TIMER_USEC_LATENCY_TIME;
+ break;
+ }
+
+ link->eee.tx_lpi_timer = p_caps->eee_lpi_timer;
+ link->eee.tx_lpi_enable = link->eee.enable;
+ link->eee.adv_caps = QED_EEE_1G_ADV | QED_EEE_10G_ADV;
+ } else {
+ p_caps->default_eee = QED_MCP_EEE_UNSUPPORTED;
+ }
+
+ if (p_hwfn->mcp_info->capabilities &
+ FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL) {
+ ext_speed = &link->ext_speed;
+
+ link_temp = qed_rd(p_hwfn, p_ptt,
+ port_cfg_addr +
+ offsetof(struct nvm_cfg1_port,
+ extended_speed));
+
+ fld = GET_MFW_FIELD(link_temp, NVM_CFG1_PORT_EXTENDED_SPEED);
+ if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_AN)
+ ext_speed->autoneg = true;
+
+ ext_speed->forced_speed = 0;
+ if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_1G)
+ ext_speed->forced_speed |= QED_EXT_SPEED_1G;
+ if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_10G)
+ ext_speed->forced_speed |= QED_EXT_SPEED_10G;
+ if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_20G)
+ ext_speed->forced_speed |= QED_EXT_SPEED_20G;
+ if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_25G)
+ ext_speed->forced_speed |= QED_EXT_SPEED_25G;
+ if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_40G)
+ ext_speed->forced_speed |= QED_EXT_SPEED_40G;
+ if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_50G_R)
+ ext_speed->forced_speed |= QED_EXT_SPEED_50G_R;
+ if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_50G_R2)
+ ext_speed->forced_speed |= QED_EXT_SPEED_50G_R2;
+ if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_R2)
+ ext_speed->forced_speed |= QED_EXT_SPEED_100G_R2;
+ if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_R4)
+ ext_speed->forced_speed |= QED_EXT_SPEED_100G_R4;
+ if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_P4)
+ ext_speed->forced_speed |= QED_EXT_SPEED_100G_P4;
+
+ fld = GET_MFW_FIELD(link_temp,
+ NVM_CFG1_PORT_EXTENDED_SPEED_CAP);
+
+ ext_speed->advertised_speeds = 0;
+ if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_RESERVED)
+ ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_RES;
+ if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_1G)
+ ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_1G;
+ if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_10G)
+ ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_10G;
+ if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_20G)
+ ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_20G;
+ if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_25G)
+ ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_25G;
+ if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_40G)
+ ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_40G;
+ if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_50G_R)
+ ext_speed->advertised_speeds |=
+ QED_EXT_SPEED_MASK_50G_R;
+ if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_50G_R2)
+ ext_speed->advertised_speeds |=
+ QED_EXT_SPEED_MASK_50G_R2;
+ if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_R2)
+ ext_speed->advertised_speeds |=
+ QED_EXT_SPEED_MASK_100G_R2;
+ if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_R4)
+ ext_speed->advertised_speeds |=
+ QED_EXT_SPEED_MASK_100G_R4;
+ if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_P4)
+ ext_speed->advertised_speeds |=
+ QED_EXT_SPEED_MASK_100G_P4;
+
+ link_temp = qed_rd(p_hwfn, p_ptt,
+ port_cfg_addr +
+ offsetof(struct nvm_cfg1_port,
+ extended_fec_mode));
+ link->ext_fec_mode = link_temp;
+
+ p_caps->default_ext_speed_caps = ext_speed->advertised_speeds;
+ p_caps->default_ext_speed = ext_speed->forced_speed;
+ p_caps->default_ext_autoneg = ext_speed->autoneg;
+ p_caps->default_ext_fec = link->ext_fec_mode;
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Read default extended link config: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, FEC: 0x%02x\n",
+ ext_speed->forced_speed,
+ ext_speed->advertised_speeds, ext_speed->autoneg,
+ p_caps->default_ext_fec);
+ }
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x, EEE: 0x%02x [0x%08x usec], FEC: 0x%02x\n",
+ link->speed.forced_speed, link->speed.advertised_speeds,
+ link->speed.autoneg, link->pause.autoneg,
+ p_caps->default_eee, p_caps->eee_lpi_timer,
+ p_caps->fec_default);
+
+ if (IS_LEAD_HWFN(p_hwfn)) {
+ struct qed_dev *cdev = p_hwfn->cdev;
+
+ /* Read Multi-function information from shmem */
+ addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ offsetof(struct nvm_cfg1, glob) +
+ offsetof(struct nvm_cfg1_glob, generic_cont0);
+
+ generic_cont0 = qed_rd(p_hwfn, p_ptt, addr);
+
+ mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >>
+ NVM_CFG1_GLOB_MF_MODE_OFFSET;
+
+ switch (mf_mode) {
+ case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
+ cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS);
+ break;
+ case NVM_CFG1_GLOB_MF_MODE_UFP:
+ cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS) |
+ BIT(QED_MF_LLH_PROTO_CLSS) |
+ BIT(QED_MF_UFP_SPECIFIC) |
+ BIT(QED_MF_8021Q_TAGGING) |
+ BIT(QED_MF_DONT_ADD_VLAN0_TAG);
+ break;
+ case NVM_CFG1_GLOB_MF_MODE_BD:
+ cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS) |
+ BIT(QED_MF_LLH_PROTO_CLSS) |
+ BIT(QED_MF_8021AD_TAGGING) |
+ BIT(QED_MF_DONT_ADD_VLAN0_TAG);
+ break;
+ case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
+ cdev->mf_bits = BIT(QED_MF_LLH_MAC_CLSS) |
+ BIT(QED_MF_LLH_PROTO_CLSS) |
+ BIT(QED_MF_LL2_NON_UNICAST) |
+ BIT(QED_MF_INTER_PF_SWITCH) |
+ BIT(QED_MF_DISABLE_ARFS);
+ break;
+ case NVM_CFG1_GLOB_MF_MODE_DEFAULT:
+ cdev->mf_bits = BIT(QED_MF_LLH_MAC_CLSS) |
+ BIT(QED_MF_LLH_PROTO_CLSS) |
+ BIT(QED_MF_LL2_NON_UNICAST);
+ if (QED_IS_BB(p_hwfn->cdev))
+ cdev->mf_bits |= BIT(QED_MF_NEED_DEF_PF);
+ break;
+ }
+
+ DP_INFO(p_hwfn, "Multi function mode is 0x%lx\n",
+ cdev->mf_bits);
+
+ /* In CMT the PF is unknown when the GFS block processes the
+ * packet. Therefore cannot use searcher as it has a per PF
+ * database, and thus ARFS must be disabled.
+ *
+ */
+ if (QED_IS_CMT(cdev))
+ cdev->mf_bits |= BIT(QED_MF_DISABLE_ARFS);
+ }
+
+ DP_INFO(p_hwfn, "Multi function mode is 0x%lx\n",
+ p_hwfn->cdev->mf_bits);
+
+ /* Read device capabilities information from shmem */
+ addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ offsetof(struct nvm_cfg1, glob) +
+ offsetof(struct nvm_cfg1_glob, device_capabilities);
+
+ device_capabilities = qed_rd(p_hwfn, p_ptt, addr);
+ if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET)
+ __set_bit(QED_DEV_CAP_ETH,
+ &p_hwfn->hw_info.device_capabilities);
+ if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE)
+ __set_bit(QED_DEV_CAP_FCOE,
+ &p_hwfn->hw_info.device_capabilities);
+ if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI)
+ __set_bit(QED_DEV_CAP_ISCSI,
+ &p_hwfn->hw_info.device_capabilities);
+ if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE)
+ __set_bit(QED_DEV_CAP_ROCE,
+ &p_hwfn->hw_info.device_capabilities);
+
+ /* Read device serial number information from shmem */
+ addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ offsetof(struct nvm_cfg1, glob) +
+ offsetof(struct nvm_cfg1_glob, serial_number);
+
+ for (i = 0; i < 4; i++)
+ p_hwfn->hw_info.part_num[i] = qed_rd(p_hwfn, p_ptt, addr + i * 4);
+
+ return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
+}
+
+static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u8 num_funcs, enabled_func_idx = p_hwfn->rel_pf_id;
+ u32 reg_function_hide, tmp, eng_mask, low_pfs_mask;
+ struct qed_dev *cdev = p_hwfn->cdev;
+
+ num_funcs = QED_IS_AH(cdev) ? MAX_NUM_PFS_K2 : MAX_NUM_PFS_BB;
+
+ /* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values
+ * in the other bits are selected.
+ * Bits 1-15 are for functions 1-15, respectively, and their value is
+ * '0' only for enabled functions (function 0 always exists and
+ * enabled).
+ * In case of CMT, only the "even" functions are enabled, and thus the
+ * number of functions for both hwfns is learnt from the same bits.
+ */
+ reg_function_hide = qed_rd(p_hwfn, p_ptt, MISCS_REG_FUNCTION_HIDE);
+
+ if (reg_function_hide & 0x1) {
+ if (QED_IS_BB(cdev)) {
+ if (QED_PATH_ID(p_hwfn) && cdev->num_hwfns == 1) {
+ num_funcs = 0;
+ eng_mask = 0xaaaa;
+ } else {
+ num_funcs = 1;
+ eng_mask = 0x5554;
+ }
+ } else {
+ num_funcs = 1;
+ eng_mask = 0xfffe;
+ }
+
+ /* Get the number of the enabled functions on the engine */
+ tmp = (reg_function_hide ^ 0xffffffff) & eng_mask;
+ while (tmp) {
+ if (tmp & 0x1)
+ num_funcs++;
+ tmp >>= 0x1;
+ }
+
+ /* Get the PF index within the enabled functions */
+ low_pfs_mask = (0x1 << p_hwfn->abs_pf_id) - 1;
+ tmp = reg_function_hide & eng_mask & low_pfs_mask;
+ while (tmp) {
+ if (tmp & 0x1)
+ enabled_func_idx--;
+ tmp >>= 0x1;
+ }
+ }
+
+ p_hwfn->num_funcs_on_engine = num_funcs;
+ p_hwfn->enabled_func_idx = enabled_func_idx;
+
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_PROBE,
+ "PF [rel_id %d, abs_id %d] occupies index %d within the %d enabled functions on the engine\n",
+ p_hwfn->rel_pf_id,
+ p_hwfn->abs_pf_id,
+ p_hwfn->enabled_func_idx, p_hwfn->num_funcs_on_engine);
+}
+
+static void qed_hw_info_port_num(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 addr, global_offsize, global_addr, port_mode;
+ struct qed_dev *cdev = p_hwfn->cdev;
+
+ /* In CMT there is always only one port */
+ if (cdev->num_hwfns > 1) {
+ cdev->num_ports_in_engine = 1;
+ cdev->num_ports = 1;
+ return;
+ }
+
+ /* Determine the number of ports per engine */
+ port_mode = qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE);
+ switch (port_mode) {
+ case 0x0:
+ cdev->num_ports_in_engine = 1;
+ break;
+ case 0x1:
+ cdev->num_ports_in_engine = 2;
+ break;
+ case 0x2:
+ cdev->num_ports_in_engine = 4;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "Unknown port mode 0x%08x\n", port_mode);
+ cdev->num_ports_in_engine = 1; /* Default to something */
+ break;
+ }
+
+ /* Get the total number of ports of the device */
+ addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_GLOBAL);
+ global_offsize = qed_rd(p_hwfn, p_ptt, addr);
+ global_addr = SECTION_ADDR(global_offsize, 0);
+ addr = global_addr + offsetof(struct public_global, max_ports);
+ cdev->num_ports = (u8)qed_rd(p_hwfn, p_ptt, addr);
+}
+
+static void qed_get_eee_caps(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_mcp_link_capabilities *p_caps;
+ u32 eee_status;
+
+ p_caps = &p_hwfn->mcp_info->link_capabilities;
+ if (p_caps->default_eee == QED_MCP_EEE_UNSUPPORTED)
+ return;
+
+ p_caps->eee_speed_caps = 0;
+ eee_status = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, eee_status));
+ eee_status = (eee_status & EEE_SUPPORTED_SPEED_MASK) >>
+ EEE_SUPPORTED_SPEED_OFFSET;
+
+ if (eee_status & EEE_1G_SUPPORTED)
+ p_caps->eee_speed_caps |= QED_EEE_1G_ADV;
+ if (eee_status & EEE_10G_ADV)
+ p_caps->eee_speed_caps |= QED_EEE_10G_ADV;
+}
+
+static int
+qed_get_hw_info(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_pci_personality personality)
+{
+ int rc;
+
+ /* Since all information is common, only first hwfns should do this */
+ if (IS_LEAD_HWFN(p_hwfn)) {
+ rc = qed_iov_hw_info(p_hwfn);
+ if (rc)
+ return rc;
+ }
+
+ if (IS_LEAD_HWFN(p_hwfn))
+ qed_hw_info_port_num(p_hwfn, p_ptt);
+
+ qed_mcp_get_capabilities(p_hwfn, p_ptt);
+
+ qed_hw_get_nvm_info(p_hwfn, p_ptt);
+
+ rc = qed_int_igu_read_cam(p_hwfn, p_ptt);
+ if (rc)
+ return rc;
+
+ if (qed_mcp_is_init(p_hwfn))
+ ether_addr_copy(p_hwfn->hw_info.hw_mac_addr,
+ p_hwfn->mcp_info->func_info.mac);
+ else
+ eth_random_addr(p_hwfn->hw_info.hw_mac_addr);
+
+ if (qed_mcp_is_init(p_hwfn)) {
+ if (p_hwfn->mcp_info->func_info.ovlan != QED_MCP_VLAN_UNSET)
+ p_hwfn->hw_info.ovlan =
+ p_hwfn->mcp_info->func_info.ovlan;
+
+ qed_mcp_cmd_port_init(p_hwfn, p_ptt);
+
+ qed_get_eee_caps(p_hwfn, p_ptt);
+
+ qed_mcp_read_ufp_config(p_hwfn, p_ptt);
+ }
+
+ if (qed_mcp_is_init(p_hwfn)) {
+ enum qed_pci_personality protocol;
+
+ protocol = p_hwfn->mcp_info->func_info.protocol;
+ p_hwfn->hw_info.personality = protocol;
+ }
+
+ if (QED_IS_ROCE_PERSONALITY(p_hwfn))
+ p_hwfn->hw_info.multi_tc_roce_en = true;
+
+ p_hwfn->hw_info.num_hw_tc = NUM_PHYS_TCS_4PORT_K2;
+ p_hwfn->hw_info.num_active_tc = 1;
+
+ qed_get_num_funcs(p_hwfn, p_ptt);
+
+ if (qed_mcp_is_init(p_hwfn))
+ p_hwfn->hw_info.mtu = p_hwfn->mcp_info->func_info.mtu;
+
+ return qed_hw_get_resc(p_hwfn, p_ptt);
+}
+
+static int qed_get_dev_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_dev *cdev = p_hwfn->cdev;
+ u16 device_id_mask;
+ u32 tmp;
+
+ /* Read Vendor Id / Device Id */
+ pci_read_config_word(cdev->pdev, PCI_VENDOR_ID, &cdev->vendor_id);
+ pci_read_config_word(cdev->pdev, PCI_DEVICE_ID, &cdev->device_id);
+
+ /* Determine type */
+ device_id_mask = cdev->device_id & QED_DEV_ID_MASK;
+ switch (device_id_mask) {
+ case QED_DEV_ID_MASK_BB:
+ cdev->type = QED_DEV_TYPE_BB;
+ break;
+ case QED_DEV_ID_MASK_AH:
+ cdev->type = QED_DEV_TYPE_AH;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "Unknown device id 0x%x\n", cdev->device_id);
+ return -EBUSY;
+ }
+
+ cdev->chip_num = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_NUM);
+ cdev->chip_rev = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_REV);
+
+ MASK_FIELD(CHIP_REV, cdev->chip_rev);
+
+ /* Learn number of HW-functions */
+ tmp = qed_rd(p_hwfn, p_ptt, MISCS_REG_CMT_ENABLED_FOR_PAIR);
+
+ if (tmp & (1 << p_hwfn->rel_pf_id)) {
+ DP_NOTICE(cdev->hwfns, "device in CMT mode\n");
+ cdev->num_hwfns = 2;
+ } else {
+ cdev->num_hwfns = 1;
+ }
+
+ cdev->chip_bond_id = qed_rd(p_hwfn, p_ptt,
+ MISCS_REG_CHIP_TEST_REG) >> 4;
+ MASK_FIELD(CHIP_BOND_ID, cdev->chip_bond_id);
+ cdev->chip_metal = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_METAL);
+ MASK_FIELD(CHIP_METAL, cdev->chip_metal);
+
+ DP_INFO(cdev->hwfns,
+ "Chip details - %s %c%d, Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n",
+ QED_IS_BB(cdev) ? "BB" : "AH",
+ 'A' + cdev->chip_rev,
+ (int)cdev->chip_metal,
+ cdev->chip_num, cdev->chip_rev,
+ cdev->chip_bond_id, cdev->chip_metal);
+
+ return 0;
+}
+
+static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
+ void __iomem *p_regview,
+ void __iomem *p_doorbells,
+ u64 db_phys_addr,
+ enum qed_pci_personality personality)
+{
+ struct qed_dev *cdev = p_hwfn->cdev;
+ int rc = 0;
+
+ /* Split PCI bars evenly between hwfns */
+ p_hwfn->regview = p_regview;
+ p_hwfn->doorbells = p_doorbells;
+ p_hwfn->db_phys_addr = db_phys_addr;
+
+ if (IS_VF(p_hwfn->cdev))
+ return qed_vf_hw_prepare(p_hwfn);
+
+ /* Validate that chip access is feasible */
+ if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) {
+ DP_ERR(p_hwfn,
+ "Reading the ME register returns all Fs; Preventing further chip access\n");
+ return -EINVAL;
+ }
+
+ get_function_id(p_hwfn);
+
+ /* Allocate PTT pool */
+ rc = qed_ptt_pool_alloc(p_hwfn);
+ if (rc)
+ goto err0;
+
+ /* Allocate the main PTT */
+ p_hwfn->p_main_ptt = qed_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN);
+
+ /* First hwfn learns basic information, e.g., number of hwfns */
+ if (!p_hwfn->my_id) {
+ rc = qed_get_dev_info(p_hwfn, p_hwfn->p_main_ptt);
+ if (rc)
+ goto err1;
+ }
+
+ qed_hw_hwfn_prepare(p_hwfn);
+
+ /* Initialize MCP structure */
+ rc = qed_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed initializing mcp command\n");
+ goto err1;
+ }
+
+ /* Read the device configuration information from the HW and SHMEM */
+ rc = qed_get_hw_info(p_hwfn, p_hwfn->p_main_ptt, personality);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to get HW information\n");
+ goto err2;
+ }
+
+ /* Sending a mailbox to the MFW should be done after qed_get_hw_info()
+ * is called as it sets the ports number in an engine.
+ */
+ if (IS_LEAD_HWFN(p_hwfn) && !cdev->recov_in_prog) {
+ rc = qed_mcp_initiate_pf_flr(p_hwfn, p_hwfn->p_main_ptt);
+ if (rc)
+ DP_NOTICE(p_hwfn, "Failed to initiate PF FLR\n");
+ }
+
+ /* NVRAM info initialization and population */
+ if (IS_LEAD_HWFN(p_hwfn)) {
+ rc = qed_mcp_nvm_info_populate(p_hwfn);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "Failed to populate nvm info shadow\n");
+ goto err2;
+ }
+ }
+
+ /* Allocate the init RT array and initialize the init-ops engine */
+ rc = qed_init_alloc(p_hwfn);
+ if (rc)
+ goto err3;
+
+ return rc;
+err3:
+ if (IS_LEAD_HWFN(p_hwfn))
+ qed_mcp_nvm_info_free(p_hwfn);
+err2:
+ if (IS_LEAD_HWFN(p_hwfn))
+ qed_iov_free_hw_info(p_hwfn->cdev);
+ qed_mcp_free(p_hwfn);
+err1:
+ qed_hw_hwfn_free(p_hwfn);
+err0:
+ return rc;
+}
+
+int qed_hw_prepare(struct qed_dev *cdev,
+ int personality)
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ int rc;
+
+ /* Store the precompiled init data ptrs */
+ if (IS_PF(cdev))
+ qed_init_iro_array(cdev);
+
+ /* Initialize the first hwfn - will learn number of hwfns */
+ rc = qed_hw_prepare_single(p_hwfn,
+ cdev->regview,
+ cdev->doorbells,
+ cdev->db_phys_addr,
+ personality);
+ if (rc)
+ return rc;
+
+ personality = p_hwfn->hw_info.personality;
+
+ /* Initialize the rest of the hwfns */
+ if (cdev->num_hwfns > 1) {
+ void __iomem *p_regview, *p_doorbell;
+ u64 db_phys_addr;
+ u32 offset;
+
+ /* adjust bar offset for second engine */
+ offset = qed_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt,
+ BAR_ID_0) / 2;
+ p_regview = cdev->regview + offset;
+
+ offset = qed_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt,
+ BAR_ID_1) / 2;
+
+ p_doorbell = cdev->doorbells + offset;
+
+ db_phys_addr = cdev->db_phys_addr + offset;
+
+ /* prepare second hw function */
+ rc = qed_hw_prepare_single(&cdev->hwfns[1], p_regview,
+ p_doorbell, db_phys_addr,
+ personality);
+
+ /* in case of error, need to free the previously
+ * initiliazed hwfn 0.
+ */
+ if (rc) {
+ if (IS_PF(cdev)) {
+ qed_init_free(p_hwfn);
+ qed_mcp_nvm_info_free(p_hwfn);
+ qed_mcp_free(p_hwfn);
+ qed_hw_hwfn_free(p_hwfn);
+ }
+ }
+ }
+
+ return rc;
+}
+
+void qed_hw_remove(struct qed_dev *cdev)
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ int i;
+
+ if (IS_PF(cdev))
+ qed_mcp_ov_update_driver_state(p_hwfn, p_hwfn->p_main_ptt,
+ QED_OV_DRIVER_STATE_NOT_LOADED);
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ if (IS_VF(cdev)) {
+ qed_vf_pf_release(p_hwfn);
+ continue;
+ }
+
+ qed_init_free(p_hwfn);
+ qed_hw_hwfn_free(p_hwfn);
+ qed_mcp_free(p_hwfn);
+ }
+
+ qed_iov_free_hw_info(cdev);
+
+ qed_mcp_nvm_info_free(p_hwfn);
+}
+
+int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, u16 src_id, u16 *dst_id)
+{
+ if (src_id >= RESC_NUM(p_hwfn, QED_L2_QUEUE)) {
+ u16 min, max;
+
+ min = (u16)RESC_START(p_hwfn, QED_L2_QUEUE);
+ max = min + RESC_NUM(p_hwfn, QED_L2_QUEUE);
+ DP_NOTICE(p_hwfn,
+ "l2_queue id [%d] is not valid, available indices [%d - %d]\n",
+ src_id, min, max);
+
+ return -EINVAL;
+ }
+
+ *dst_id = RESC_START(p_hwfn, QED_L2_QUEUE) + src_id;
+
+ return 0;
+}
+
+int qed_fw_vport(struct qed_hwfn *p_hwfn, u8 src_id, u8 *dst_id)
+{
+ if (src_id >= RESC_NUM(p_hwfn, QED_VPORT)) {
+ u8 min, max;
+
+ min = (u8)RESC_START(p_hwfn, QED_VPORT);
+ max = min + RESC_NUM(p_hwfn, QED_VPORT);
+ DP_NOTICE(p_hwfn,
+ "vport id [%d] is not valid, available indices [%d - %d]\n",
+ src_id, min, max);
+
+ return -EINVAL;
+ }
+
+ *dst_id = RESC_START(p_hwfn, QED_VPORT) + src_id;
+
+ return 0;
+}
+
+int qed_fw_rss_eng(struct qed_hwfn *p_hwfn, u8 src_id, u8 *dst_id)
+{
+ if (src_id >= RESC_NUM(p_hwfn, QED_RSS_ENG)) {
+ u8 min, max;
+
+ min = (u8)RESC_START(p_hwfn, QED_RSS_ENG);
+ max = min + RESC_NUM(p_hwfn, QED_RSS_ENG);
+ DP_NOTICE(p_hwfn,
+ "rss_eng id [%d] is not valid, available indices [%d - %d]\n",
+ src_id, min, max);
+
+ return -EINVAL;
+ }
+
+ *dst_id = RESC_START(p_hwfn, QED_RSS_ENG) + src_id;
+
+ return 0;
+}
+
+static int qed_set_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u32 hw_addr, void *p_eth_qzone,
+ size_t eth_qzone_size, u8 timeset)
+{
+ struct coalescing_timeset *p_coal_timeset;
+
+ if (p_hwfn->cdev->int_coalescing_mode != QED_COAL_MODE_ENABLE) {
+ DP_NOTICE(p_hwfn, "Coalescing configuration not enabled\n");
+ return -EINVAL;
+ }
+
+ p_coal_timeset = p_eth_qzone;
+ memset(p_eth_qzone, 0, eth_qzone_size);
+ SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_TIMESET, timeset);
+ SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_VALID, 1);
+ qed_memcpy_to(p_hwfn, p_ptt, hw_addr, p_eth_qzone, eth_qzone_size);
+
+ return 0;
+}
+
+int qed_set_queue_coalesce(u16 rx_coal, u16 tx_coal, void *p_handle)
+{
+ struct qed_queue_cid *p_cid = p_handle;
+ struct qed_hwfn *p_hwfn;
+ struct qed_ptt *p_ptt;
+ int rc = 0;
+
+ p_hwfn = p_cid->p_owner;
+
+ if (IS_VF(p_hwfn->cdev))
+ return qed_vf_pf_set_coalesce(p_hwfn, rx_coal, tx_coal, p_cid);
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EAGAIN;
+
+ if (rx_coal) {
+ rc = qed_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
+ if (rc)
+ goto out;
+ p_hwfn->cdev->rx_coalesce_usecs = rx_coal;
+ }
+
+ if (tx_coal) {
+ rc = qed_set_txq_coalesce(p_hwfn, p_ptt, tx_coal, p_cid);
+ if (rc)
+ goto out;
+ p_hwfn->cdev->tx_coalesce_usecs = tx_coal;
+ }
+out:
+ qed_ptt_release(p_hwfn, p_ptt);
+ return rc;
+}
+
+int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 coalesce, struct qed_queue_cid *p_cid)
+{
+ struct ustorm_eth_queue_zone eth_qzone;
+ u8 timeset, timer_res;
+ u32 address;
+ int rc;
+
+ /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */
+ if (coalesce <= 0x7F) {
+ timer_res = 0;
+ } else if (coalesce <= 0xFF) {
+ timer_res = 1;
+ } else if (coalesce <= 0x1FF) {
+ timer_res = 2;
+ } else {
+ DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce);
+ return -EINVAL;
+ }
+ timeset = (u8)(coalesce >> timer_res);
+
+ rc = qed_int_set_timer_res(p_hwfn, p_ptt, timer_res,
+ p_cid->sb_igu_id, false);
+ if (rc)
+ goto out;
+
+ address = BAR0_MAP_REG_USDM_RAM +
+ USTORM_ETH_QUEUE_ZONE_GTT_OFFSET(p_cid->abs.queue_id);
+
+ rc = qed_set_coalesce(p_hwfn, p_ptt, address, &eth_qzone,
+ sizeof(struct ustorm_eth_queue_zone), timeset);
+ if (rc)
+ goto out;
+
+out:
+ return rc;
+}
+
+int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 coalesce, struct qed_queue_cid *p_cid)
+{
+ struct xstorm_eth_queue_zone eth_qzone;
+ u8 timeset, timer_res;
+ u32 address;
+ int rc;
+
+ /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */
+ if (coalesce <= 0x7F) {
+ timer_res = 0;
+ } else if (coalesce <= 0xFF) {
+ timer_res = 1;
+ } else if (coalesce <= 0x1FF) {
+ timer_res = 2;
+ } else {
+ DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce);
+ return -EINVAL;
+ }
+ timeset = (u8)(coalesce >> timer_res);
+
+ rc = qed_int_set_timer_res(p_hwfn, p_ptt, timer_res,
+ p_cid->sb_igu_id, true);
+ if (rc)
+ goto out;
+
+ address = BAR0_MAP_REG_XSDM_RAM +
+ XSTORM_ETH_QUEUE_ZONE_GTT_OFFSET(p_cid->abs.queue_id);
+
+ rc = qed_set_coalesce(p_hwfn, p_ptt, address, &eth_qzone,
+ sizeof(struct xstorm_eth_queue_zone), timeset);
+out:
+ return rc;
+}
+
+/* Calculate final WFQ values for all vports and configure them.
+ * After this configuration each vport will have
+ * approx min rate = min_pf_rate * (vport_wfq / QED_WFQ_UNIT)
+ */
+static void qed_configure_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 min_pf_rate)
+{
+ struct init_qm_vport_params *vport_params;
+ int i;
+
+ vport_params = p_hwfn->qm_info.qm_vport_params;
+
+ for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
+ u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
+
+ vport_params[i].wfq = (wfq_speed * QED_WFQ_UNIT) /
+ min_pf_rate;
+ qed_init_vport_wfq(p_hwfn, p_ptt,
+ vport_params[i].first_tx_pq_id,
+ vport_params[i].wfq);
+ }
+}
+
+static void qed_init_wfq_default_param(struct qed_hwfn *p_hwfn,
+ u32 min_pf_rate)
+
+{
+ int i;
+
+ for (i = 0; i < p_hwfn->qm_info.num_vports; i++)
+ p_hwfn->qm_info.qm_vport_params[i].wfq = 1;
+}
+
+static void qed_disable_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 min_pf_rate)
+{
+ struct init_qm_vport_params *vport_params;
+ int i;
+
+ vport_params = p_hwfn->qm_info.qm_vport_params;
+
+ for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
+ qed_init_wfq_default_param(p_hwfn, min_pf_rate);
+ qed_init_vport_wfq(p_hwfn, p_ptt,
+ vport_params[i].first_tx_pq_id,
+ vport_params[i].wfq);
+ }
+}
+
+/* This function performs several validations for WFQ
+ * configuration and required min rate for a given vport
+ * 1. req_rate must be greater than one percent of min_pf_rate.
+ * 2. req_rate should not cause other vports [not configured for WFQ explicitly]
+ * rates to get less than one percent of min_pf_rate.
+ * 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate.
+ */
+static int qed_init_wfq_param(struct qed_hwfn *p_hwfn,
+ u16 vport_id, u32 req_rate, u32 min_pf_rate)
+{
+ u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0;
+ int non_requested_count = 0, req_count = 0, i, num_vports;
+
+ num_vports = p_hwfn->qm_info.num_vports;
+
+ if (num_vports < 2) {
+ DP_NOTICE(p_hwfn, "Unexpected num_vports: %d\n", num_vports);
+ return -EINVAL;
+ }
+
+ /* Accounting for the vports which are configured for WFQ explicitly */
+ for (i = 0; i < num_vports; i++) {
+ u32 tmp_speed;
+
+ if ((i != vport_id) &&
+ p_hwfn->qm_info.wfq_data[i].configured) {
+ req_count++;
+ tmp_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
+ total_req_min_rate += tmp_speed;
+ }
+ }
+
+ /* Include current vport data as well */
+ req_count++;
+ total_req_min_rate += req_rate;
+ non_requested_count = num_vports - req_count;
+
+ if (req_rate < min_pf_rate / QED_WFQ_UNIT) {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Vport [%d] - Requested rate[%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
+ vport_id, req_rate, min_pf_rate);
+ return -EINVAL;
+ }
+
+ if (num_vports > QED_WFQ_UNIT) {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Number of vports is greater than %d\n",
+ QED_WFQ_UNIT);
+ return -EINVAL;
+ }
+
+ if (total_req_min_rate > min_pf_rate) {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Total requested min rate for all vports[%d Mbps] is greater than configured PF min rate[%d Mbps]\n",
+ total_req_min_rate, min_pf_rate);
+ return -EINVAL;
+ }
+
+ total_left_rate = min_pf_rate - total_req_min_rate;
+
+ left_rate_per_vp = total_left_rate / non_requested_count;
+ if (left_rate_per_vp < min_pf_rate / QED_WFQ_UNIT) {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Non WFQ configured vports rate [%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
+ left_rate_per_vp, min_pf_rate);
+ return -EINVAL;
+ }
+
+ p_hwfn->qm_info.wfq_data[vport_id].min_speed = req_rate;
+ p_hwfn->qm_info.wfq_data[vport_id].configured = true;
+
+ for (i = 0; i < num_vports; i++) {
+ if (p_hwfn->qm_info.wfq_data[i].configured)
+ continue;
+
+ p_hwfn->qm_info.wfq_data[i].min_speed = left_rate_per_vp;
+ }
+
+ return 0;
+}
+
+static int __qed_configure_vport_wfq(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u16 vp_id, u32 rate)
+{
+ struct qed_mcp_link_state *p_link;
+ int rc = 0;
+
+ p_link = &p_hwfn->cdev->hwfns[0].mcp_info->link_output;
+
+ if (!p_link->min_pf_rate) {
+ p_hwfn->qm_info.wfq_data[vp_id].min_speed = rate;
+ p_hwfn->qm_info.wfq_data[vp_id].configured = true;
+ return rc;
+ }
+
+ rc = qed_init_wfq_param(p_hwfn, vp_id, rate, p_link->min_pf_rate);
+
+ if (!rc)
+ qed_configure_wfq_for_all_vports(p_hwfn, p_ptt,
+ p_link->min_pf_rate);
+ else
+ DP_NOTICE(p_hwfn,
+ "Validation failed while configuring min rate\n");
+
+ return rc;
+}
+
+static int __qed_configure_vp_wfq_on_link_change(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 min_pf_rate)
+{
+ bool use_wfq = false;
+ int rc = 0;
+ u16 i;
+
+ /* Validate all pre configured vports for wfq */
+ for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
+ u32 rate;
+
+ if (!p_hwfn->qm_info.wfq_data[i].configured)
+ continue;
+
+ rate = p_hwfn->qm_info.wfq_data[i].min_speed;
+ use_wfq = true;
+
+ rc = qed_init_wfq_param(p_hwfn, i, rate, min_pf_rate);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "WFQ validation failed while configuring min rate\n");
+ break;
+ }
+ }
+
+ if (!rc && use_wfq)
+ qed_configure_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
+ else
+ qed_disable_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
+
+ return rc;
+}
+
+/* Main API for qed clients to configure vport min rate.
+ * vp_id - vport id in PF Range[0 - (total_num_vports_per_pf - 1)]
+ * rate - Speed in Mbps needs to be assigned to a given vport.
+ */
+int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate)
+{
+ int i, rc = -EINVAL;
+
+ /* Currently not supported; Might change in future */
+ if (cdev->num_hwfns > 1) {
+ DP_NOTICE(cdev,
+ "WFQ configuration is not supported for this device\n");
+ return rc;
+ }
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ struct qed_ptt *p_ptt;
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EBUSY;
+
+ rc = __qed_configure_vport_wfq(p_hwfn, p_ptt, vp_id, rate);
+
+ if (rc) {
+ qed_ptt_release(p_hwfn, p_ptt);
+ return rc;
+ }
+
+ qed_ptt_release(p_hwfn, p_ptt);
+ }
+
+ return rc;
+}
+
+/* API to configure WFQ from mcp link change */
+void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev,
+ struct qed_ptt *p_ptt, u32 min_pf_rate)
+{
+ int i;
+
+ if (cdev->num_hwfns > 1) {
+ DP_VERBOSE(cdev,
+ NETIF_MSG_LINK,
+ "WFQ configuration is not supported for this device\n");
+ return;
+ }
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ __qed_configure_vp_wfq_on_link_change(p_hwfn, p_ptt,
+ min_pf_rate);
+ }
+}
+
+int __qed_configure_pf_max_bandwidth(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_mcp_link_state *p_link,
+ u8 max_bw)
+{
+ int rc = 0;
+
+ p_hwfn->mcp_info->func_info.bandwidth_max = max_bw;
+
+ if (!p_link->line_speed && (max_bw != 100))
+ return rc;
+
+ p_link->speed = (p_link->line_speed * max_bw) / 100;
+ p_hwfn->qm_info.pf_rl = p_link->speed;
+
+ /* Since the limiter also affects Tx-switched traffic, we don't want it
+ * to limit such traffic in case there's no actual limit.
+ * In that case, set limit to imaginary high boundary.
+ */
+ if (max_bw == 100)
+ p_hwfn->qm_info.pf_rl = 100000;
+
+ rc = qed_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
+ p_hwfn->qm_info.pf_rl);
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Configured MAX bandwidth to be %08x Mb/sec\n",
+ p_link->speed);
+
+ return rc;
+}
+
+/* Main API to configure PF max bandwidth where bw range is [1 - 100] */
+int qed_configure_pf_max_bandwidth(struct qed_dev *cdev, u8 max_bw)
+{
+ int i, rc = -EINVAL;
+
+ if (max_bw < 1 || max_bw > 100) {
+ DP_NOTICE(cdev, "PF max bw valid range is [1-100]\n");
+ return rc;
+ }
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev);
+ struct qed_mcp_link_state *p_link;
+ struct qed_ptt *p_ptt;
+
+ p_link = &p_lead->mcp_info->link_output;
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EBUSY;
+
+ rc = __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt,
+ p_link, max_bw);
+
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ if (rc)
+ break;
+ }
+
+ return rc;
+}
+
+int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_mcp_link_state *p_link,
+ u8 min_bw)
+{
+ int rc = 0;
+
+ p_hwfn->mcp_info->func_info.bandwidth_min = min_bw;
+ p_hwfn->qm_info.pf_wfq = min_bw;
+
+ if (!p_link->line_speed)
+ return rc;
+
+ p_link->min_pf_rate = (p_link->line_speed * min_bw) / 100;
+
+ rc = qed_init_pf_wfq(p_hwfn, p_ptt, p_hwfn->rel_pf_id, min_bw);
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Configured MIN bandwidth to be %d Mb/sec\n",
+ p_link->min_pf_rate);
+
+ return rc;
+}
+
+/* Main API to configure PF min bandwidth where bw range is [1-100] */
+int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw)
+{
+ int i, rc = -EINVAL;
+
+ if (min_bw < 1 || min_bw > 100) {
+ DP_NOTICE(cdev, "PF min bw valid range is [1-100]\n");
+ return rc;
+ }
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev);
+ struct qed_mcp_link_state *p_link;
+ struct qed_ptt *p_ptt;
+
+ p_link = &p_lead->mcp_info->link_output;
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EBUSY;
+
+ rc = __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt,
+ p_link, min_bw);
+ if (rc) {
+ qed_ptt_release(p_hwfn, p_ptt);
+ return rc;
+ }
+
+ if (p_link->min_pf_rate) {
+ u32 min_rate = p_link->min_pf_rate;
+
+ rc = __qed_configure_vp_wfq_on_link_change(p_hwfn,
+ p_ptt,
+ min_rate);
+ }
+
+ qed_ptt_release(p_hwfn, p_ptt);
+ }
+
+ return rc;
+}
+
+void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_mcp_link_state *p_link;
+
+ p_link = &p_hwfn->mcp_info->link_output;
+
+ if (p_link->min_pf_rate)
+ qed_disable_wfq_for_all_vports(p_hwfn, p_ptt,
+ p_link->min_pf_rate);
+
+ memset(p_hwfn->qm_info.wfq_data, 0,
+ sizeof(*p_hwfn->qm_info.wfq_data) * p_hwfn->qm_info.num_vports);
+}
+
+int qed_device_num_ports(struct qed_dev *cdev)
+{
+ return cdev->num_ports;
+}
+
+void qed_set_fw_mac_addr(__le16 *fw_msb,
+ __le16 *fw_mid, __le16 *fw_lsb, u8 *mac)
+{
+ ((u8 *)fw_msb)[0] = mac[1];
+ ((u8 *)fw_msb)[1] = mac[0];
+ ((u8 *)fw_mid)[0] = mac[3];
+ ((u8 *)fw_mid)[1] = mac[2];
+ ((u8 *)fw_lsb)[0] = mac[5];
+ ((u8 *)fw_lsb)[1] = mac[4];
+}
+
+static int qed_llh_shadow_remove_all_filters(struct qed_dev *cdev, u8 ppfid)
+{
+ struct qed_llh_info *p_llh_info = cdev->p_llh_info;
+ struct qed_llh_filter_info *p_filters;
+ int rc;
+
+ rc = qed_llh_shadow_sanity(cdev, ppfid, 0, "remove_all");
+ if (rc)
+ return rc;
+
+ p_filters = p_llh_info->pp_filters[ppfid];
+ memset(p_filters, 0, NIG_REG_LLH_FUNC_FILTER_EN_SIZE *
+ sizeof(*p_filters));
+
+ return 0;
+}
+
+static void qed_llh_clear_ppfid_filters(struct qed_dev *cdev, u8 ppfid)
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
+ u8 filter_idx, abs_ppfid;
+ int rc = 0;
+
+ if (!p_ptt)
+ return;
+
+ if (!test_bit(QED_MF_LLH_PROTO_CLSS, &cdev->mf_bits) &&
+ !test_bit(QED_MF_LLH_MAC_CLSS, &cdev->mf_bits))
+ goto out;
+
+ rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid);
+ if (rc)
+ goto out;
+
+ rc = qed_llh_shadow_remove_all_filters(cdev, ppfid);
+ if (rc)
+ goto out;
+
+ for (filter_idx = 0; filter_idx < NIG_REG_LLH_FUNC_FILTER_EN_SIZE;
+ filter_idx++) {
+ rc = qed_llh_remove_filter(p_hwfn, p_ptt,
+ abs_ppfid, filter_idx);
+ if (rc)
+ goto out;
+ }
+out:
+ qed_ptt_release(p_hwfn, p_ptt);
+}
+
+int qed_llh_add_src_tcp_port_filter(struct qed_dev *cdev, u16 src_port)
+{
+ return qed_llh_add_protocol_filter(cdev, 0,
+ QED_LLH_FILTER_TCP_SRC_PORT,
+ src_port, QED_LLH_DONT_CARE);
+}
+
+void qed_llh_remove_src_tcp_port_filter(struct qed_dev *cdev, u16 src_port)
+{
+ qed_llh_remove_protocol_filter(cdev, 0,
+ QED_LLH_FILTER_TCP_SRC_PORT,
+ src_port, QED_LLH_DONT_CARE);
+}
+
+int qed_llh_add_dst_tcp_port_filter(struct qed_dev *cdev, u16 dest_port)
+{
+ return qed_llh_add_protocol_filter(cdev, 0,
+ QED_LLH_FILTER_TCP_DEST_PORT,
+ QED_LLH_DONT_CARE, dest_port);
+}
+
+void qed_llh_remove_dst_tcp_port_filter(struct qed_dev *cdev, u16 dest_port)
+{
+ qed_llh_remove_protocol_filter(cdev, 0,
+ QED_LLH_FILTER_TCP_DEST_PORT,
+ QED_LLH_DONT_CARE, dest_port);
+}
+
+void qed_llh_clear_all_filters(struct qed_dev *cdev)
+{
+ u8 ppfid;
+
+ if (!test_bit(QED_MF_LLH_PROTO_CLSS, &cdev->mf_bits) &&
+ !test_bit(QED_MF_LLH_MAC_CLSS, &cdev->mf_bits))
+ return;
+
+ for (ppfid = 0; ppfid < cdev->p_llh_info->num_ppfid; ppfid++)
+ qed_llh_clear_ppfid_filters(cdev, ppfid);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
new file mode 100644
index 000000000..94d4f9413
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
@@ -0,0 +1,529 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ * Copyright (c) 2019-2020 Marvell International Ltd.
+ */
+
+#ifndef _QED_DEV_API_H
+#define _QED_DEV_API_H
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/qed/qed_chain.h>
+#include <linux/qed/qed_if.h>
+#include "qed_int.h"
+
+/**
+ * qed_init_dp(): Initialize the debug level.
+ *
+ * @cdev: Qed dev pointer.
+ * @dp_module: Module debug parameter.
+ * @dp_level: Module debug level.
+ *
+ * Return: Void.
+ */
+void qed_init_dp(struct qed_dev *cdev,
+ u32 dp_module,
+ u8 dp_level);
+
+/**
+ * qed_init_struct(): Initialize the device structure to
+ * its defaults.
+ *
+ * @cdev: Qed dev pointer.
+ *
+ * Return: Void.
+ */
+void qed_init_struct(struct qed_dev *cdev);
+
+/**
+ * qed_resc_free: Free device resources.
+ *
+ * @cdev: Qed dev pointer.
+ *
+ * Return: Void.
+ */
+void qed_resc_free(struct qed_dev *cdev);
+
+/**
+ * qed_resc_alloc(): Alloc device resources.
+ *
+ * @cdev: Qed dev pointer.
+ *
+ * Return: Int.
+ */
+int qed_resc_alloc(struct qed_dev *cdev);
+
+/**
+ * qed_resc_setup(): Setup device resources.
+ *
+ * @cdev: Qed dev pointer.
+ *
+ * Return: Void.
+ */
+void qed_resc_setup(struct qed_dev *cdev);
+
+enum qed_override_force_load {
+ QED_OVERRIDE_FORCE_LOAD_NONE,
+ QED_OVERRIDE_FORCE_LOAD_ALWAYS,
+ QED_OVERRIDE_FORCE_LOAD_NEVER,
+};
+
+struct qed_drv_load_params {
+ /* Indicates whether the driver is running over a crash kernel.
+ * As part of the load request, this will be used for providing the
+ * driver role to the MFW.
+ * In case of a crash kernel over PDA - this should be set to false.
+ */
+ bool is_crash_kernel;
+
+ /* The timeout value that the MFW should use when locking the engine for
+ * the driver load process.
+ * A value of '0' means the default value, and '255' means no timeout.
+ */
+ u8 mfw_timeout_val;
+#define QED_LOAD_REQ_LOCK_TO_DEFAULT 0
+#define QED_LOAD_REQ_LOCK_TO_NONE 255
+
+ /* Avoid engine reset when first PF loads on it */
+ bool avoid_eng_reset;
+
+ /* Allow overriding the default force load behavior */
+ enum qed_override_force_load override_force_load;
+};
+
+struct qed_hw_init_params {
+ /* Tunneling parameters */
+ struct qed_tunnel_info *p_tunn;
+
+ bool b_hw_start;
+
+ /* Interrupt mode [msix, inta, etc.] to use */
+ enum qed_int_mode int_mode;
+
+ /* NPAR tx switching to be used for vports for tx-switching */
+ bool allow_npar_tx_switch;
+
+ /* Binary fw data pointer in binary fw file */
+ const u8 *bin_fw_data;
+
+ /* Driver load parameters */
+ struct qed_drv_load_params *p_drv_load_params;
+};
+
+/**
+ * qed_hw_init(): Init Qed hardware.
+ *
+ * @cdev: Qed dev pointer.
+ * @p_params: Pointers to params.
+ *
+ * Return: Int.
+ */
+int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params);
+
+/**
+ * qed_hw_timers_stop_all(): Stop the timers HW block.
+ *
+ * @cdev: Qed dev pointer.
+ *
+ * Return: void.
+ */
+void qed_hw_timers_stop_all(struct qed_dev *cdev);
+
+/**
+ * qed_hw_stop(): Stop Qed hardware.
+ *
+ * @cdev: Qed dev pointer.
+ *
+ * Return: int.
+ */
+int qed_hw_stop(struct qed_dev *cdev);
+
+/**
+ * qed_hw_stop_fastpath(): Should be called incase
+ * slowpath is still required for the device,
+ * but fastpath is not.
+ *
+ * @cdev: Qed dev pointer.
+ *
+ * Return: Int.
+ */
+int qed_hw_stop_fastpath(struct qed_dev *cdev);
+
+/**
+ * qed_hw_start_fastpath(): Restart fastpath traffic,
+ * only if hw_stop_fastpath was called.
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Return: Int.
+ */
+int qed_hw_start_fastpath(struct qed_hwfn *p_hwfn);
+
+/**
+ * qed_hw_prepare(): Prepare Qed hardware.
+ *
+ * @cdev: Qed dev pointer.
+ * @personality: Personality to initialize.
+ *
+ * Return: Int.
+ */
+int qed_hw_prepare(struct qed_dev *cdev,
+ int personality);
+
+/**
+ * qed_hw_remove(): Remove Qed hardware.
+ *
+ * @cdev: Qed dev pointer.
+ *
+ * Return: Void.
+ */
+void qed_hw_remove(struct qed_dev *cdev);
+
+/**
+ * qed_ptt_acquire(): Allocate a PTT window.
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Return: struct qed_ptt.
+ *
+ * Should be called at the entry point to the driver (at the beginning of an
+ * exported function).
+ */
+struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn);
+
+/**
+ * qed_ptt_acquire_context(): Allocate a PTT window honoring the context
+ * atomicy.
+ *
+ * @p_hwfn: HW device data.
+ * @is_atomic: Hint from the caller - if the func can sleep or not.
+ *
+ * Context: The function should not sleep in case is_atomic == true.
+ * Return: struct qed_ptt.
+ *
+ * Should be called at the entry point to the driver
+ * (at the beginning of an exported function).
+ */
+struct qed_ptt *qed_ptt_acquire_context(struct qed_hwfn *p_hwfn,
+ bool is_atomic);
+
+/**
+ * qed_ptt_release(): Release PTT Window.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ *
+ * Return: Void.
+ *
+ * Should be called at the end of a flow - at the end of the function that
+ * acquired the PTT.
+ */
+void qed_ptt_release(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+void qed_reset_vport_stats(struct qed_dev *cdev);
+
+enum qed_dmae_address_type_t {
+ QED_DMAE_ADDRESS_HOST_VIRT,
+ QED_DMAE_ADDRESS_HOST_PHYS,
+ QED_DMAE_ADDRESS_GRC
+};
+
+/**
+ * qed_dmae_host2grc(): Copy data from source addr to
+ * dmae registers using the given ptt.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @source_addr: Source address.
+ * @grc_addr: GRC address (dmae_data_offset).
+ * @size_in_dwords: Size.
+ * @p_params: (default parameters will be used in case of NULL).
+ *
+ * Return: Int.
+ */
+int
+qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u64 source_addr,
+ u32 grc_addr,
+ u32 size_in_dwords,
+ struct qed_dmae_params *p_params);
+
+ /**
+ * qed_dmae_grc2host(): Read data from dmae data offset
+ * to source address using the given ptt.
+ *
+ * @p_ptt: P_ptt.
+ * @grc_addr: GRC address (dmae_data_offset).
+ * @dest_addr: Destination Address.
+ * @size_in_dwords: Size.
+ * @p_params: (default parameters will be used in case of NULL).
+ *
+ * Return: Int.
+ */
+int qed_dmae_grc2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u32 grc_addr, dma_addr_t dest_addr, u32 size_in_dwords,
+ struct qed_dmae_params *p_params);
+
+/**
+ * qed_dmae_host2host(): Copy data from to source address
+ * to a destination adrress (for SRIOV) using the given
+ * ptt.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @source_addr: Source address.
+ * @dest_addr: Destination address.
+ * @size_in_dwords: size.
+ * @p_params: (default parameters will be used in case of NULL).
+ *
+ * Return: Int.
+ */
+int qed_dmae_host2host(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ dma_addr_t source_addr,
+ dma_addr_t dest_addr,
+ u32 size_in_dwords, struct qed_dmae_params *p_params);
+
+int qed_chain_alloc(struct qed_dev *cdev, struct qed_chain *chain,
+ struct qed_chain_init_params *params);
+void qed_chain_free(struct qed_dev *cdev, struct qed_chain *chain);
+
+/**
+ * qed_fw_l2_queue(): Get absolute L2 queue ID.
+ *
+ * @p_hwfn: HW device data.
+ * @src_id: Relative to p_hwfn.
+ * @dst_id: Absolute per engine.
+ *
+ * Return: Int.
+ */
+int qed_fw_l2_queue(struct qed_hwfn *p_hwfn,
+ u16 src_id,
+ u16 *dst_id);
+
+/**
+ * qed_fw_vport(): Get absolute vport ID.
+ *
+ * @p_hwfn: HW device data.
+ * @src_id: Relative to p_hwfn.
+ * @dst_id: Absolute per engine.
+ *
+ * Return: Int.
+ */
+int qed_fw_vport(struct qed_hwfn *p_hwfn,
+ u8 src_id,
+ u8 *dst_id);
+
+/**
+ * qed_fw_rss_eng(): Get absolute RSS engine ID.
+ *
+ * @p_hwfn: HW device data.
+ * @src_id: Relative to p_hwfn.
+ * @dst_id: Absolute per engine.
+ *
+ * Return: Int.
+ */
+int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
+ u8 src_id,
+ u8 *dst_id);
+
+/**
+ * qed_llh_get_num_ppfid(): Return the allocated number of LLH filter
+ * banks that are allocated to the PF.
+ *
+ * @cdev: Qed dev pointer.
+ *
+ * Return: u8 Number of LLH filter banks.
+ */
+u8 qed_llh_get_num_ppfid(struct qed_dev *cdev);
+
+enum qed_eng {
+ QED_ENG0,
+ QED_ENG1,
+ QED_BOTH_ENG,
+};
+
+/**
+ * qed_llh_set_ppfid_affinity(): Set the engine affinity for the given
+ * LLH filter bank.
+ *
+ * @cdev: Qed dev pointer.
+ * @ppfid: Relative within the allocated ppfids ('0' is the default one).
+ * @eng: Engine.
+ *
+ * Return: Int.
+ */
+int qed_llh_set_ppfid_affinity(struct qed_dev *cdev,
+ u8 ppfid, enum qed_eng eng);
+
+/**
+ * qed_llh_set_roce_affinity(): Set the RoCE engine affinity.
+ *
+ * @cdev: Qed dev pointer.
+ * @eng: Engine.
+ *
+ * Return: Int.
+ */
+int qed_llh_set_roce_affinity(struct qed_dev *cdev, enum qed_eng eng);
+
+/**
+ * qed_llh_add_mac_filter(): Add a LLH MAC filter into the given filter
+ * bank.
+ *
+ * @cdev: Qed dev pointer.
+ * @ppfid: Relative within the allocated ppfids ('0' is the default one).
+ * @mac_addr: MAC to add.
+ *
+ * Return: Int.
+ */
+int qed_llh_add_mac_filter(struct qed_dev *cdev,
+ u8 ppfid, const u8 mac_addr[ETH_ALEN]);
+
+/**
+ * qed_llh_remove_mac_filter(): Remove a LLH MAC filter from the given
+ * filter bank.
+ *
+ * @cdev: Qed dev pointer.
+ * @ppfid: Ppfid.
+ * @mac_addr: MAC to remove
+ *
+ * Return: Void.
+ */
+void qed_llh_remove_mac_filter(struct qed_dev *cdev,
+ u8 ppfid, u8 mac_addr[ETH_ALEN]);
+
+enum qed_llh_prot_filter_type_t {
+ QED_LLH_FILTER_ETHERTYPE,
+ QED_LLH_FILTER_TCP_SRC_PORT,
+ QED_LLH_FILTER_TCP_DEST_PORT,
+ QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT,
+ QED_LLH_FILTER_UDP_SRC_PORT,
+ QED_LLH_FILTER_UDP_DEST_PORT,
+ QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT
+};
+
+/**
+ * qed_llh_add_protocol_filter(): Add a LLH protocol filter into the
+ * given filter bank.
+ *
+ * @cdev: Qed dev pointer.
+ * @ppfid: Relative within the allocated ppfids ('0' is the default one).
+ * @type: Type of filters and comparing.
+ * @source_port_or_eth_type: Source port or ethertype to add.
+ * @dest_port: Destination port to add.
+ *
+ * Return: Int.
+ */
+int
+qed_llh_add_protocol_filter(struct qed_dev *cdev,
+ u8 ppfid,
+ enum qed_llh_prot_filter_type_t type,
+ u16 source_port_or_eth_type, u16 dest_port);
+
+/**
+ * qed_llh_remove_protocol_filter(): Remove a LLH protocol filter from
+ * the given filter bank.
+ *
+ * @cdev: Qed dev pointer.
+ * @ppfid: Relative within the allocated ppfids ('0' is the default one).
+ * @type: Type of filters and comparing.
+ * @source_port_or_eth_type: Source port or ethertype to add.
+ * @dest_port: Destination port to add.
+ */
+void
+qed_llh_remove_protocol_filter(struct qed_dev *cdev,
+ u8 ppfid,
+ enum qed_llh_prot_filter_type_t type,
+ u16 source_port_or_eth_type, u16 dest_port);
+
+/**
+ * qed_final_cleanup(): Cleanup of previous driver remains prior to load.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @id: For PF, engine-relative. For VF, PF-relative.
+ * @is_vf: True iff cleanup is made for a VF.
+ *
+ * Return: Int.
+ */
+int qed_final_cleanup(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u16 id, bool is_vf);
+
+/**
+ * qed_get_queue_coalesce(): Retrieve coalesce value for a given queue.
+ *
+ * @p_hwfn: HW device data.
+ * @coal: Store coalesce value read from the hardware.
+ * @handle: P_handle.
+ *
+ * Return: Int.
+ **/
+int qed_get_queue_coalesce(struct qed_hwfn *p_hwfn, u16 *coal, void *handle);
+
+/**
+ * qed_set_queue_coalesce(): Configure coalesce parameters for Rx and
+ * Tx queue. The fact that we can configure coalescing to up to 511, but on
+ * varying accuracy [the bigger the value the less accurate] up to a mistake
+ * of 3usec for the highest values.
+ * While the API allows setting coalescing per-qid, all queues sharing a SB
+ * should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff]
+ * otherwise configuration would break.
+ *
+ * @rx_coal: Rx Coalesce value in micro seconds.
+ * @tx_coal: TX Coalesce value in micro seconds.
+ * @p_handle: P_handle.
+ *
+ * Return: Int.
+ **/
+int
+qed_set_queue_coalesce(u16 rx_coal, u16 tx_coal, void *p_handle);
+
+/**
+ * qed_pglueb_set_pfid_enable(): Enable or disable PCI BUS MASTER.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @b_enable: True/False.
+ *
+ * Return: Int.
+ */
+int qed_pglueb_set_pfid_enable(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, bool b_enable);
+
+/**
+ * qed_db_recovery_add(): add doorbell information to the doorbell
+ * recovery mechanism.
+ *
+ * @cdev: Qed dev pointer.
+ * @db_addr: Doorbell address.
+ * @db_data: Address of where db_data is stored.
+ * @db_width: Doorbell is 32b pr 64b.
+ * @db_space: Doorbell recovery addresses are user or kernel space.
+ *
+ * Return: Int.
+ */
+int qed_db_recovery_add(struct qed_dev *cdev,
+ void __iomem *db_addr,
+ void *db_data,
+ enum qed_db_rec_width db_width,
+ enum qed_db_rec_space db_space);
+
+/**
+ * qed_db_recovery_del() - remove doorbell information from the doorbell
+ * recovery mechanism. db_data serves as key (db_addr is not unique).
+ *
+ * @cdev: Qed dev pointer.
+ * @db_addr: doorbell address.
+ * @db_data: address where db_data is stored. Serves as key for the
+ * entry to delete.
+ *
+ * Return: Int.
+ */
+int qed_db_recovery_del(struct qed_dev *cdev,
+ void __iomem *db_addr, void *db_data);
+
+const char *qed_hw_get_resc_name(enum qed_resources res_id);
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_devlink.c b/drivers/net/ethernet/qlogic/qed/qed_devlink.c
new file mode 100644
index 000000000..6bb4e165b
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_devlink.c
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Marvell/Qlogic FastLinQ NIC driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#include <linux/kernel.h>
+#include <linux/qed/qed_if.h>
+#include <linux/vmalloc.h>
+#include "qed.h"
+#include "qed_devlink.h"
+
+enum qed_devlink_param_id {
+ QED_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ QED_DEVLINK_PARAM_ID_IWARP_CMT,
+};
+
+struct qed_fw_fatal_ctx {
+ enum qed_hw_err_type err_type;
+};
+
+int qed_report_fatal_error(struct devlink *devlink, enum qed_hw_err_type err_type)
+{
+ struct qed_devlink *qdl = devlink_priv(devlink);
+ struct qed_fw_fatal_ctx fw_fatal_ctx = {
+ .err_type = err_type,
+ };
+
+ if (qdl->fw_reporter)
+ devlink_health_report(qdl->fw_reporter,
+ "Fatal error occurred", &fw_fatal_ctx);
+
+ return 0;
+}
+
+static int
+qed_fw_fatal_reporter_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *priv_ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct qed_devlink *qdl = devlink_health_reporter_priv(reporter);
+ struct qed_fw_fatal_ctx *fw_fatal_ctx = priv_ctx;
+ struct qed_dev *cdev = qdl->cdev;
+ u32 dbg_data_buf_size;
+ u8 *p_dbg_data_buf;
+ int err;
+
+ /* Having context means that was a dump request after fatal,
+ * so we enable extra debugging while gathering the dump,
+ * just in case
+ */
+ cdev->print_dbg_data = fw_fatal_ctx ? true : false;
+
+ dbg_data_buf_size = qed_dbg_all_data_size(cdev);
+ p_dbg_data_buf = vzalloc(dbg_data_buf_size);
+ if (!p_dbg_data_buf) {
+ DP_NOTICE(cdev,
+ "Failed to allocate memory for a debug data buffer\n");
+ return -ENOMEM;
+ }
+
+ err = qed_dbg_all_data(cdev, p_dbg_data_buf);
+ if (err) {
+ DP_NOTICE(cdev, "Failed to obtain debug data\n");
+ vfree(p_dbg_data_buf);
+ return err;
+ }
+
+ err = devlink_fmsg_binary_pair_put(fmsg, "dump_data",
+ p_dbg_data_buf, dbg_data_buf_size);
+
+ vfree(p_dbg_data_buf);
+
+ return err;
+}
+
+static int
+qed_fw_fatal_reporter_recover(struct devlink_health_reporter *reporter,
+ void *priv_ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct qed_devlink *qdl = devlink_health_reporter_priv(reporter);
+ struct qed_dev *cdev = qdl->cdev;
+
+ qed_recovery_process(cdev);
+
+ return 0;
+}
+
+static const struct devlink_health_reporter_ops qed_fw_fatal_reporter_ops = {
+ .name = "fw_fatal",
+ .recover = qed_fw_fatal_reporter_recover,
+ .dump = qed_fw_fatal_reporter_dump,
+};
+
+#define QED_REPORTER_FW_GRACEFUL_PERIOD 0
+
+void qed_fw_reporters_create(struct devlink *devlink)
+{
+ struct qed_devlink *dl = devlink_priv(devlink);
+
+ dl->fw_reporter = devlink_health_reporter_create(devlink, &qed_fw_fatal_reporter_ops,
+ QED_REPORTER_FW_GRACEFUL_PERIOD, dl);
+ if (IS_ERR(dl->fw_reporter)) {
+ DP_NOTICE(dl->cdev, "Failed to create fw reporter, err = %ld\n",
+ PTR_ERR(dl->fw_reporter));
+ dl->fw_reporter = NULL;
+ }
+}
+
+void qed_fw_reporters_destroy(struct devlink *devlink)
+{
+ struct qed_devlink *dl = devlink_priv(devlink);
+ struct devlink_health_reporter *rep;
+
+ rep = dl->fw_reporter;
+
+ if (!IS_ERR_OR_NULL(rep))
+ devlink_health_reporter_destroy(rep);
+}
+
+static int qed_dl_param_get(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct qed_devlink *qed_dl = devlink_priv(dl);
+ struct qed_dev *cdev;
+
+ cdev = qed_dl->cdev;
+ ctx->val.vbool = cdev->iwarp_cmt;
+
+ return 0;
+}
+
+static int qed_dl_param_set(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct qed_devlink *qed_dl = devlink_priv(dl);
+ struct qed_dev *cdev;
+
+ cdev = qed_dl->cdev;
+ cdev->iwarp_cmt = ctx->val.vbool;
+
+ return 0;
+}
+
+static const struct devlink_param qed_devlink_params[] = {
+ DEVLINK_PARAM_DRIVER(QED_DEVLINK_PARAM_ID_IWARP_CMT,
+ "iwarp_cmt", DEVLINK_PARAM_TYPE_BOOL,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ qed_dl_param_get, qed_dl_param_set, NULL),
+};
+
+static int qed_devlink_info_get(struct devlink *devlink,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct qed_devlink *qed_dl = devlink_priv(devlink);
+ struct qed_dev *cdev = qed_dl->cdev;
+ struct qed_dev_info *dev_info;
+ char buf[100];
+ int err;
+
+ dev_info = &cdev->common_dev_info;
+
+ err = devlink_info_driver_name_put(req, KBUILD_MODNAME);
+ if (err)
+ return err;
+
+ memcpy(buf, cdev->hwfns[0].hw_info.part_num, sizeof(cdev->hwfns[0].hw_info.part_num));
+ buf[sizeof(cdev->hwfns[0].hw_info.part_num)] = 0;
+
+ if (buf[0]) {
+ err = devlink_info_board_serial_number_put(req, buf);
+ if (err)
+ return err;
+ }
+
+ snprintf(buf, sizeof(buf), "%d.%d.%d.%d",
+ GET_MFW_FIELD(dev_info->mfw_rev, QED_MFW_VERSION_3),
+ GET_MFW_FIELD(dev_info->mfw_rev, QED_MFW_VERSION_2),
+ GET_MFW_FIELD(dev_info->mfw_rev, QED_MFW_VERSION_1),
+ GET_MFW_FIELD(dev_info->mfw_rev, QED_MFW_VERSION_0));
+
+ err = devlink_info_version_stored_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, buf);
+ if (err)
+ return err;
+
+ snprintf(buf, sizeof(buf), "%d.%d.%d.%d",
+ dev_info->fw_major,
+ dev_info->fw_minor,
+ dev_info->fw_rev,
+ dev_info->fw_eng);
+
+ return devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW_APP, buf);
+}
+
+static const struct devlink_ops qed_dl_ops = {
+ .info_get = qed_devlink_info_get,
+};
+
+struct devlink *qed_devlink_register(struct qed_dev *cdev)
+{
+ union devlink_param_value value;
+ struct qed_devlink *qdevlink;
+ struct devlink *dl;
+ int rc;
+
+ dl = devlink_alloc(&qed_dl_ops, sizeof(struct qed_devlink),
+ &cdev->pdev->dev);
+ if (!dl)
+ return ERR_PTR(-ENOMEM);
+
+ qdevlink = devlink_priv(dl);
+ qdevlink->cdev = cdev;
+
+ rc = devlink_params_register(dl, qed_devlink_params,
+ ARRAY_SIZE(qed_devlink_params));
+ if (rc)
+ goto err_unregister;
+
+ value.vbool = false;
+ devlink_param_driverinit_value_set(dl,
+ QED_DEVLINK_PARAM_ID_IWARP_CMT,
+ value);
+
+ cdev->iwarp_cmt = false;
+
+ qed_fw_reporters_create(dl);
+ devlink_register(dl);
+ return dl;
+
+err_unregister:
+ devlink_free(dl);
+
+ return ERR_PTR(rc);
+}
+
+void qed_devlink_unregister(struct devlink *devlink)
+{
+ if (!devlink)
+ return;
+
+ devlink_unregister(devlink);
+ qed_fw_reporters_destroy(devlink);
+
+ devlink_params_unregister(devlink, qed_devlink_params,
+ ARRAY_SIZE(qed_devlink_params));
+
+ devlink_free(devlink);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_devlink.h b/drivers/net/ethernet/qlogic/qed/qed_devlink.h
new file mode 100644
index 000000000..ccc7d1d1b
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_devlink.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Marvell/Qlogic FastLinQ NIC driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+#ifndef _QED_DEVLINK_H
+#define _QED_DEVLINK_H
+
+#include <linux/qed/qed_if.h>
+#include <net/devlink.h>
+
+struct devlink *qed_devlink_register(struct qed_dev *cdev);
+void qed_devlink_unregister(struct devlink *devlink);
+
+void qed_fw_reporters_create(struct devlink *devlink);
+void qed_fw_reporters_destroy(struct devlink *devlink);
+
+int qed_report_fatal_error(struct devlink *dl, enum qed_hw_err_type err_type);
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
new file mode 100644
index 000000000..04602ac94
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
@@ -0,0 +1,1045 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ * Copyright (c) 2019-2020 Marvell International Ltd.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <asm/param.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/log2.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#define __PREVENT_DUMP_MEM_ARR__
+#define __PREVENT_PXP_GLOBAL_WIN__
+#include "qed.h"
+#include "qed_cxt.h"
+#include "qed_dev_api.h"
+#include "qed_fcoe.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_int.h"
+#include "qed_iro_hsi.h"
+#include "qed_ll2.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+#include "qed_sp.h"
+#include "qed_sriov.h"
+#include <linux/qed/qed_fcoe_if.h>
+
+struct qed_fcoe_conn {
+ struct list_head list_entry;
+ bool free_on_delete;
+
+ u16 conn_id;
+ u32 icid;
+ u32 fw_cid;
+ u8 layer_code;
+
+ dma_addr_t sq_pbl_addr;
+ dma_addr_t sq_curr_page_addr;
+ dma_addr_t sq_next_page_addr;
+ dma_addr_t xferq_pbl_addr;
+ void *xferq_pbl_addr_virt_addr;
+ dma_addr_t xferq_addr[4];
+ void *xferq_addr_virt_addr[4];
+ dma_addr_t confq_pbl_addr;
+ void *confq_pbl_addr_virt_addr;
+ dma_addr_t confq_addr[2];
+ void *confq_addr_virt_addr[2];
+
+ dma_addr_t terminate_params;
+
+ u16 dst_mac_addr_lo;
+ u16 dst_mac_addr_mid;
+ u16 dst_mac_addr_hi;
+ u16 src_mac_addr_lo;
+ u16 src_mac_addr_mid;
+ u16 src_mac_addr_hi;
+
+ u16 tx_max_fc_pay_len;
+ u16 e_d_tov_timer_val;
+ u16 rec_tov_timer_val;
+ u16 rx_max_fc_pay_len;
+ u16 vlan_tag;
+ u16 physical_q0;
+
+ struct fc_addr_nw s_id;
+ u8 max_conc_seqs_c3;
+ struct fc_addr_nw d_id;
+ u8 flags;
+ u8 def_q_idx;
+};
+
+static int
+qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_addr)
+{
+ struct qed_fcoe_pf_params *fcoe_pf_params = NULL;
+ struct fcoe_init_ramrod_params *p_ramrod = NULL;
+ struct fcoe_init_func_ramrod_data *p_data;
+ struct fcoe_conn_context *p_cxt = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ struct qed_cxt_info cxt_info;
+ u32 dummy_cid;
+ int rc = 0;
+ __le16 tmp;
+ u8 i;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_addr;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ FCOE_RAMROD_CMD_ID_INIT_FUNC,
+ PROTOCOLID_FCOE, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.fcoe_init;
+ p_data = &p_ramrod->init_ramrod_data;
+ fcoe_pf_params = &p_hwfn->pf_params.fcoe_pf_params;
+
+ /* Sanity */
+ if (fcoe_pf_params->num_cqs > p_hwfn->hw_info.feat_num[QED_FCOE_CQ]) {
+ DP_ERR(p_hwfn,
+ "Cannot satisfy CQ amount. CQs requested %d, CQs available %d. Aborting function start\n",
+ fcoe_pf_params->num_cqs,
+ p_hwfn->hw_info.feat_num[QED_FCOE_CQ]);
+ rc = -EINVAL;
+ goto err;
+ }
+
+ p_data->mtu = cpu_to_le16(fcoe_pf_params->mtu);
+ tmp = cpu_to_le16(fcoe_pf_params->sq_num_pbl_pages);
+ p_data->sq_num_pages_in_pbl = tmp;
+
+ rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_FCOE, &dummy_cid);
+ if (rc)
+ goto err;
+
+ cxt_info.iid = dummy_cid;
+ rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Cannot find context info for dummy cid=%d\n",
+ dummy_cid);
+ goto err;
+ }
+ p_cxt = cxt_info.p_cxt;
+ memset(p_cxt, 0, sizeof(*p_cxt));
+
+ SET_FIELD(p_cxt->tstorm_ag_context.flags3,
+ TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN, 1);
+
+ fcoe_pf_params->dummy_icid = (u16)dummy_cid;
+
+ tmp = cpu_to_le16(fcoe_pf_params->num_tasks);
+ p_data->func_params.num_tasks = tmp;
+ p_data->func_params.log_page_size = fcoe_pf_params->log_page_size;
+ p_data->func_params.debug_mode = fcoe_pf_params->debug_mode;
+
+ DMA_REGPAIR_LE(p_data->q_params.glbl_q_params_addr,
+ fcoe_pf_params->glbl_q_params_addr);
+
+ tmp = cpu_to_le16(fcoe_pf_params->cq_num_entries);
+ p_data->q_params.cq_num_entries = tmp;
+
+ tmp = cpu_to_le16(fcoe_pf_params->cmdq_num_entries);
+ p_data->q_params.cmdq_num_entries = tmp;
+
+ p_data->q_params.num_queues = fcoe_pf_params->num_cqs;
+
+ tmp = (__force __le16)p_hwfn->hw_info.resc_start[QED_CMDQS_CQS];
+ p_data->q_params.queue_relative_offset = (__force u8)tmp;
+
+ for (i = 0; i < fcoe_pf_params->num_cqs; i++) {
+ tmp = cpu_to_le16(qed_get_igu_sb_id(p_hwfn, i));
+ p_data->q_params.cq_cmdq_sb_num_arr[i] = tmp;
+ }
+
+ p_data->q_params.cq_sb_pi = fcoe_pf_params->gl_rq_pi;
+ p_data->q_params.cmdq_sb_pi = fcoe_pf_params->gl_cmd_pi;
+
+ p_data->q_params.bdq_resource_id = (u8)RESC_START(p_hwfn, QED_BDQ);
+
+ DMA_REGPAIR_LE(p_data->q_params.bdq_pbl_base_address[BDQ_ID_RQ],
+ fcoe_pf_params->bdq_pbl_base_addr[BDQ_ID_RQ]);
+ p_data->q_params.bdq_pbl_num_entries[BDQ_ID_RQ] =
+ fcoe_pf_params->bdq_pbl_num_entries[BDQ_ID_RQ];
+ tmp = cpu_to_le16(fcoe_pf_params->bdq_xoff_threshold[BDQ_ID_RQ]);
+ p_data->q_params.bdq_xoff_threshold[BDQ_ID_RQ] = tmp;
+ tmp = cpu_to_le16(fcoe_pf_params->bdq_xon_threshold[BDQ_ID_RQ]);
+ p_data->q_params.bdq_xon_threshold[BDQ_ID_RQ] = tmp;
+
+ DMA_REGPAIR_LE(p_data->q_params.bdq_pbl_base_address[BDQ_ID_IMM_DATA],
+ fcoe_pf_params->bdq_pbl_base_addr[BDQ_ID_IMM_DATA]);
+ p_data->q_params.bdq_pbl_num_entries[BDQ_ID_IMM_DATA] =
+ fcoe_pf_params->bdq_pbl_num_entries[BDQ_ID_IMM_DATA];
+ tmp = cpu_to_le16(fcoe_pf_params->bdq_xoff_threshold[BDQ_ID_IMM_DATA]);
+ p_data->q_params.bdq_xoff_threshold[BDQ_ID_IMM_DATA] = tmp;
+ tmp = cpu_to_le16(fcoe_pf_params->bdq_xon_threshold[BDQ_ID_IMM_DATA]);
+ p_data->q_params.bdq_xon_threshold[BDQ_ID_IMM_DATA] = tmp;
+ tmp = cpu_to_le16(fcoe_pf_params->rq_buffer_size);
+ p_data->q_params.rq_buffer_size = tmp;
+
+ if (fcoe_pf_params->is_target) {
+ SET_FIELD(p_data->q_params.q_validity,
+ SCSI_INIT_FUNC_QUEUES_RQ_VALID, 1);
+ if (p_data->q_params.bdq_pbl_num_entries[BDQ_ID_IMM_DATA])
+ SET_FIELD(p_data->q_params.q_validity,
+ SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID, 1);
+ SET_FIELD(p_data->q_params.q_validity,
+ SCSI_INIT_FUNC_QUEUES_CMD_VALID, 1);
+ } else {
+ SET_FIELD(p_data->q_params.q_validity,
+ SCSI_INIT_FUNC_QUEUES_RQ_VALID, 1);
+ }
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+ return rc;
+
+err:
+ qed_sp_destroy_request(p_hwfn, p_ent);
+ return rc;
+}
+
+static int
+qed_sp_fcoe_conn_offload(struct qed_hwfn *p_hwfn,
+ struct qed_fcoe_conn *p_conn,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_addr)
+{
+ struct fcoe_conn_offload_ramrod_params *p_ramrod = NULL;
+ struct fcoe_conn_offload_ramrod_data *p_data;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ u16 physical_q0;
+ __le16 tmp;
+ int rc;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_conn->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_addr;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ FCOE_RAMROD_CMD_ID_OFFLOAD_CONN,
+ PROTOCOLID_FCOE, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.fcoe_conn_ofld;
+ p_data = &p_ramrod->offload_ramrod_data;
+
+ /* Transmission PQ is the first of the PF */
+ physical_q0 = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
+ p_conn->physical_q0 = physical_q0;
+ p_data->physical_q0 = cpu_to_le16(physical_q0);
+
+ p_data->conn_id = cpu_to_le16(p_conn->conn_id);
+ DMA_REGPAIR_LE(p_data->sq_pbl_addr, p_conn->sq_pbl_addr);
+ DMA_REGPAIR_LE(p_data->sq_curr_page_addr, p_conn->sq_curr_page_addr);
+ DMA_REGPAIR_LE(p_data->sq_next_page_addr, p_conn->sq_next_page_addr);
+ DMA_REGPAIR_LE(p_data->xferq_pbl_addr, p_conn->xferq_pbl_addr);
+ DMA_REGPAIR_LE(p_data->xferq_curr_page_addr, p_conn->xferq_addr[0]);
+ DMA_REGPAIR_LE(p_data->xferq_next_page_addr, p_conn->xferq_addr[1]);
+
+ DMA_REGPAIR_LE(p_data->respq_pbl_addr, p_conn->confq_pbl_addr);
+ DMA_REGPAIR_LE(p_data->respq_curr_page_addr, p_conn->confq_addr[0]);
+ DMA_REGPAIR_LE(p_data->respq_next_page_addr, p_conn->confq_addr[1]);
+
+ p_data->dst_mac_addr_lo = cpu_to_le16(p_conn->dst_mac_addr_lo);
+ p_data->dst_mac_addr_mid = cpu_to_le16(p_conn->dst_mac_addr_mid);
+ p_data->dst_mac_addr_hi = cpu_to_le16(p_conn->dst_mac_addr_hi);
+ p_data->src_mac_addr_lo = cpu_to_le16(p_conn->src_mac_addr_lo);
+ p_data->src_mac_addr_mid = cpu_to_le16(p_conn->src_mac_addr_mid);
+ p_data->src_mac_addr_hi = cpu_to_le16(p_conn->src_mac_addr_hi);
+
+ tmp = cpu_to_le16(p_conn->tx_max_fc_pay_len);
+ p_data->tx_max_fc_pay_len = tmp;
+ tmp = cpu_to_le16(p_conn->e_d_tov_timer_val);
+ p_data->e_d_tov_timer_val = tmp;
+ tmp = cpu_to_le16(p_conn->rec_tov_timer_val);
+ p_data->rec_rr_tov_timer_val = tmp;
+ tmp = cpu_to_le16(p_conn->rx_max_fc_pay_len);
+ p_data->rx_max_fc_pay_len = tmp;
+
+ p_data->vlan_tag = cpu_to_le16(p_conn->vlan_tag);
+ p_data->s_id.addr_hi = p_conn->s_id.addr_hi;
+ p_data->s_id.addr_mid = p_conn->s_id.addr_mid;
+ p_data->s_id.addr_lo = p_conn->s_id.addr_lo;
+ p_data->max_conc_seqs_c3 = p_conn->max_conc_seqs_c3;
+ p_data->d_id.addr_hi = p_conn->d_id.addr_hi;
+ p_data->d_id.addr_mid = p_conn->d_id.addr_mid;
+ p_data->d_id.addr_lo = p_conn->d_id.addr_lo;
+ p_data->flags = p_conn->flags;
+ if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
+ SET_FIELD(p_data->flags,
+ FCOE_CONN_OFFLOAD_RAMROD_DATA_B_SINGLE_VLAN, 1);
+ p_data->def_q_idx = p_conn->def_q_idx;
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int
+qed_sp_fcoe_conn_destroy(struct qed_hwfn *p_hwfn,
+ struct qed_fcoe_conn *p_conn,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_addr)
+{
+ struct fcoe_conn_terminate_ramrod_params *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc = 0;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_conn->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_addr;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ FCOE_RAMROD_CMD_ID_TERMINATE_CONN,
+ PROTOCOLID_FCOE, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.fcoe_conn_terminate;
+ DMA_REGPAIR_LE(p_ramrod->terminate_ramrod_data.terminate_params_addr,
+ p_conn->terminate_params);
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int
+qed_sp_fcoe_func_stop(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_addr)
+{
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ u32 active_segs = 0;
+ int rc = 0;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_hwfn->pf_params.fcoe_pf_params.dummy_icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_addr;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ FCOE_RAMROD_CMD_ID_DESTROY_FUNC,
+ PROTOCOLID_FCOE, &init_data);
+ if (rc)
+ return rc;
+
+ active_segs = qed_rd(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK);
+ active_segs &= ~BIT(QED_CXT_FCOE_TID_SEG);
+ qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, active_segs);
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int
+qed_fcoe_allocate_connection(struct qed_hwfn *p_hwfn,
+ struct qed_fcoe_conn **p_out_conn)
+{
+ struct qed_fcoe_conn *p_conn = NULL;
+ void *p_addr;
+ u32 i;
+
+ spin_lock_bh(&p_hwfn->p_fcoe_info->lock);
+ if (!list_empty(&p_hwfn->p_fcoe_info->free_list))
+ p_conn =
+ list_first_entry(&p_hwfn->p_fcoe_info->free_list,
+ struct qed_fcoe_conn, list_entry);
+ if (p_conn) {
+ list_del(&p_conn->list_entry);
+ spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
+ *p_out_conn = p_conn;
+ return 0;
+ }
+ spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
+
+ p_conn = kzalloc(sizeof(*p_conn), GFP_KERNEL);
+ if (!p_conn)
+ return -ENOMEM;
+
+ p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ QED_CHAIN_PAGE_SIZE,
+ &p_conn->xferq_pbl_addr, GFP_KERNEL);
+ if (!p_addr)
+ goto nomem_pbl_xferq;
+ p_conn->xferq_pbl_addr_virt_addr = p_addr;
+
+ for (i = 0; i < ARRAY_SIZE(p_conn->xferq_addr); i++) {
+ p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ QED_CHAIN_PAGE_SIZE,
+ &p_conn->xferq_addr[i], GFP_KERNEL);
+ if (!p_addr)
+ goto nomem_xferq;
+ p_conn->xferq_addr_virt_addr[i] = p_addr;
+
+ p_addr = p_conn->xferq_pbl_addr_virt_addr;
+ ((dma_addr_t *)p_addr)[i] = p_conn->xferq_addr[i];
+ }
+
+ p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ QED_CHAIN_PAGE_SIZE,
+ &p_conn->confq_pbl_addr, GFP_KERNEL);
+ if (!p_addr)
+ goto nomem_xferq;
+ p_conn->confq_pbl_addr_virt_addr = p_addr;
+
+ for (i = 0; i < ARRAY_SIZE(p_conn->confq_addr); i++) {
+ p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ QED_CHAIN_PAGE_SIZE,
+ &p_conn->confq_addr[i], GFP_KERNEL);
+ if (!p_addr)
+ goto nomem_confq;
+ p_conn->confq_addr_virt_addr[i] = p_addr;
+
+ p_addr = p_conn->confq_pbl_addr_virt_addr;
+ ((dma_addr_t *)p_addr)[i] = p_conn->confq_addr[i];
+ }
+
+ p_conn->free_on_delete = true;
+ *p_out_conn = p_conn;
+ return 0;
+
+nomem_confq:
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ QED_CHAIN_PAGE_SIZE,
+ p_conn->confq_pbl_addr_virt_addr,
+ p_conn->confq_pbl_addr);
+ for (i = 0; i < ARRAY_SIZE(p_conn->confq_addr); i++)
+ if (p_conn->confq_addr_virt_addr[i])
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ QED_CHAIN_PAGE_SIZE,
+ p_conn->confq_addr_virt_addr[i],
+ p_conn->confq_addr[i]);
+nomem_xferq:
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ QED_CHAIN_PAGE_SIZE,
+ p_conn->xferq_pbl_addr_virt_addr,
+ p_conn->xferq_pbl_addr);
+ for (i = 0; i < ARRAY_SIZE(p_conn->xferq_addr); i++)
+ if (p_conn->xferq_addr_virt_addr[i])
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ QED_CHAIN_PAGE_SIZE,
+ p_conn->xferq_addr_virt_addr[i],
+ p_conn->xferq_addr[i]);
+nomem_pbl_xferq:
+ kfree(p_conn);
+ return -ENOMEM;
+}
+
+static void qed_fcoe_free_connection(struct qed_hwfn *p_hwfn,
+ struct qed_fcoe_conn *p_conn)
+{
+ u32 i;
+
+ if (!p_conn)
+ return;
+
+ if (p_conn->confq_pbl_addr_virt_addr)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ QED_CHAIN_PAGE_SIZE,
+ p_conn->confq_pbl_addr_virt_addr,
+ p_conn->confq_pbl_addr);
+
+ for (i = 0; i < ARRAY_SIZE(p_conn->confq_addr); i++) {
+ if (!p_conn->confq_addr_virt_addr[i])
+ continue;
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ QED_CHAIN_PAGE_SIZE,
+ p_conn->confq_addr_virt_addr[i],
+ p_conn->confq_addr[i]);
+ }
+
+ if (p_conn->xferq_pbl_addr_virt_addr)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ QED_CHAIN_PAGE_SIZE,
+ p_conn->xferq_pbl_addr_virt_addr,
+ p_conn->xferq_pbl_addr);
+
+ for (i = 0; i < ARRAY_SIZE(p_conn->xferq_addr); i++) {
+ if (!p_conn->xferq_addr_virt_addr[i])
+ continue;
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ QED_CHAIN_PAGE_SIZE,
+ p_conn->xferq_addr_virt_addr[i],
+ p_conn->xferq_addr[i]);
+ }
+ kfree(p_conn);
+}
+
+static void __iomem *qed_fcoe_get_db_addr(struct qed_hwfn *p_hwfn, u32 cid)
+{
+ return (u8 __iomem *)p_hwfn->doorbells +
+ qed_db_addr(cid, DQ_DEMS_LEGACY);
+}
+
+static void __iomem *qed_fcoe_get_primary_bdq_prod(struct qed_hwfn *p_hwfn,
+ u8 bdq_id)
+{
+ if (RESC_NUM(p_hwfn, QED_BDQ)) {
+ return (u8 __iomem *)p_hwfn->regview +
+ GET_GTT_BDQ_REG_ADDR(GTT_BAR0_MAP_REG_MSDM_RAM,
+ MSTORM_SCSI_BDQ_EXT_PROD,
+ RESC_START(p_hwfn, QED_BDQ), bdq_id);
+ } else {
+ DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
+ return NULL;
+ }
+}
+
+static void __iomem *qed_fcoe_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn,
+ u8 bdq_id)
+{
+ if (RESC_NUM(p_hwfn, QED_BDQ)) {
+ return (u8 __iomem *)p_hwfn->regview +
+ GET_GTT_BDQ_REG_ADDR(GTT_BAR0_MAP_REG_TSDM_RAM,
+ TSTORM_SCSI_BDQ_EXT_PROD,
+ RESC_START(p_hwfn, QED_BDQ), bdq_id);
+ } else {
+ DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
+ return NULL;
+ }
+}
+
+int qed_fcoe_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_fcoe_info *p_fcoe_info;
+
+ /* Allocate LL2's set struct */
+ p_fcoe_info = kzalloc(sizeof(*p_fcoe_info), GFP_KERNEL);
+ if (!p_fcoe_info) {
+ DP_NOTICE(p_hwfn, "Failed to allocate qed_fcoe_info'\n");
+ return -ENOMEM;
+ }
+ INIT_LIST_HEAD(&p_fcoe_info->free_list);
+
+ p_hwfn->p_fcoe_info = p_fcoe_info;
+ return 0;
+}
+
+void qed_fcoe_setup(struct qed_hwfn *p_hwfn)
+{
+ struct fcoe_task_context *p_task_ctx = NULL;
+ u32 i, lc;
+ int rc;
+
+ spin_lock_init(&p_hwfn->p_fcoe_info->lock);
+ for (i = 0; i < p_hwfn->pf_params.fcoe_pf_params.num_tasks; i++) {
+ rc = qed_cxt_get_task_ctx(p_hwfn, i,
+ QED_CTX_WORKING_MEM,
+ (void **)&p_task_ctx);
+ if (rc)
+ continue;
+
+ memset(p_task_ctx, 0, sizeof(struct fcoe_task_context));
+
+ lc = 0;
+ SET_FIELD(lc, TIMERS_CONTEXT_VALIDLC0, 1);
+ p_task_ctx->timer_context.logical_client_0 = cpu_to_le32(lc);
+
+ lc = 0;
+ SET_FIELD(lc, TIMERS_CONTEXT_VALIDLC1, 1);
+ p_task_ctx->timer_context.logical_client_1 = cpu_to_le32(lc);
+
+ SET_FIELD(p_task_ctx->tstorm_ag_context.flags0,
+ TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, 1);
+ }
+}
+
+void qed_fcoe_free(struct qed_hwfn *p_hwfn)
+{
+ struct qed_fcoe_conn *p_conn = NULL;
+
+ if (!p_hwfn->p_fcoe_info)
+ return;
+
+ while (!list_empty(&p_hwfn->p_fcoe_info->free_list)) {
+ p_conn = list_first_entry(&p_hwfn->p_fcoe_info->free_list,
+ struct qed_fcoe_conn, list_entry);
+ if (!p_conn)
+ break;
+ list_del(&p_conn->list_entry);
+ qed_fcoe_free_connection(p_hwfn, p_conn);
+ }
+
+ kfree(p_hwfn->p_fcoe_info);
+ p_hwfn->p_fcoe_info = NULL;
+}
+
+static int
+qed_fcoe_acquire_connection(struct qed_hwfn *p_hwfn,
+ struct qed_fcoe_conn *p_in_conn,
+ struct qed_fcoe_conn **p_out_conn)
+{
+ struct qed_fcoe_conn *p_conn = NULL;
+ int rc = 0;
+ u32 icid;
+
+ spin_lock_bh(&p_hwfn->p_fcoe_info->lock);
+ rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_FCOE, &icid);
+ spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
+ if (rc)
+ return rc;
+
+ /* Use input connection [if provided] or allocate a new one */
+ if (p_in_conn) {
+ p_conn = p_in_conn;
+ } else {
+ rc = qed_fcoe_allocate_connection(p_hwfn, &p_conn);
+ if (rc) {
+ spin_lock_bh(&p_hwfn->p_fcoe_info->lock);
+ qed_cxt_release_cid(p_hwfn, icid);
+ spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
+ return rc;
+ }
+ }
+
+ p_conn->icid = icid;
+ p_conn->fw_cid = (p_hwfn->hw_info.opaque_fid << 16) | icid;
+ *p_out_conn = p_conn;
+
+ return rc;
+}
+
+static void qed_fcoe_release_connection(struct qed_hwfn *p_hwfn,
+ struct qed_fcoe_conn *p_conn)
+{
+ spin_lock_bh(&p_hwfn->p_fcoe_info->lock);
+ list_add_tail(&p_conn->list_entry, &p_hwfn->p_fcoe_info->free_list);
+ qed_cxt_release_cid(p_hwfn, p_conn->icid);
+ spin_unlock_bh(&p_hwfn->p_fcoe_info->lock);
+}
+
+static void _qed_fcoe_get_tstats(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_fcoe_stats *p_stats)
+{
+ struct fcoe_rx_stat tstats;
+ u32 tstats_addr;
+
+ memset(&tstats, 0, sizeof(tstats));
+ tstats_addr = BAR0_MAP_REG_TSDM_RAM +
+ TSTORM_FCOE_RX_STATS_OFFSET(p_hwfn->rel_pf_id);
+ qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats));
+
+ p_stats->fcoe_rx_byte_cnt = HILO_64_REGPAIR(tstats.fcoe_rx_byte_cnt);
+ p_stats->fcoe_rx_data_pkt_cnt =
+ HILO_64_REGPAIR(tstats.fcoe_rx_data_pkt_cnt);
+ p_stats->fcoe_rx_xfer_pkt_cnt =
+ HILO_64_REGPAIR(tstats.fcoe_rx_xfer_pkt_cnt);
+ p_stats->fcoe_rx_other_pkt_cnt =
+ HILO_64_REGPAIR(tstats.fcoe_rx_other_pkt_cnt);
+
+ p_stats->fcoe_silent_drop_pkt_cmdq_full_cnt =
+ le32_to_cpu(tstats.fcoe_silent_drop_pkt_cmdq_full_cnt);
+ p_stats->fcoe_silent_drop_pkt_rq_full_cnt =
+ le32_to_cpu(tstats.fcoe_silent_drop_pkt_rq_full_cnt);
+ p_stats->fcoe_silent_drop_pkt_crc_error_cnt =
+ le32_to_cpu(tstats.fcoe_silent_drop_pkt_crc_error_cnt);
+ p_stats->fcoe_silent_drop_pkt_task_invalid_cnt =
+ le32_to_cpu(tstats.fcoe_silent_drop_pkt_task_invalid_cnt);
+ p_stats->fcoe_silent_drop_total_pkt_cnt =
+ le32_to_cpu(tstats.fcoe_silent_drop_total_pkt_cnt);
+}
+
+static void _qed_fcoe_get_pstats(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_fcoe_stats *p_stats)
+{
+ struct fcoe_tx_stat pstats;
+ u32 pstats_addr;
+
+ memset(&pstats, 0, sizeof(pstats));
+ pstats_addr = BAR0_MAP_REG_PSDM_RAM +
+ PSTORM_FCOE_TX_STATS_OFFSET(p_hwfn->rel_pf_id);
+ qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats));
+
+ p_stats->fcoe_tx_byte_cnt = HILO_64_REGPAIR(pstats.fcoe_tx_byte_cnt);
+ p_stats->fcoe_tx_data_pkt_cnt =
+ HILO_64_REGPAIR(pstats.fcoe_tx_data_pkt_cnt);
+ p_stats->fcoe_tx_xfer_pkt_cnt =
+ HILO_64_REGPAIR(pstats.fcoe_tx_xfer_pkt_cnt);
+ p_stats->fcoe_tx_other_pkt_cnt =
+ HILO_64_REGPAIR(pstats.fcoe_tx_other_pkt_cnt);
+}
+
+static int qed_fcoe_get_stats(struct qed_hwfn *p_hwfn,
+ struct qed_fcoe_stats *p_stats,
+ bool is_atomic)
+{
+ struct qed_ptt *p_ptt;
+
+ memset(p_stats, 0, sizeof(*p_stats));
+
+ p_ptt = qed_ptt_acquire_context(p_hwfn, is_atomic);
+
+ if (!p_ptt) {
+ DP_ERR(p_hwfn, "Failed to acquire ptt\n");
+ return -EINVAL;
+ }
+
+ _qed_fcoe_get_tstats(p_hwfn, p_ptt, p_stats);
+ _qed_fcoe_get_pstats(p_hwfn, p_ptt, p_stats);
+
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ return 0;
+}
+
+struct qed_hash_fcoe_con {
+ struct hlist_node node;
+ struct qed_fcoe_conn *con;
+};
+
+static int qed_fill_fcoe_dev_info(struct qed_dev *cdev,
+ struct qed_dev_fcoe_info *info)
+{
+ struct qed_hwfn *hwfn = QED_AFFIN_HWFN(cdev);
+ int rc;
+
+ memset(info, 0, sizeof(*info));
+ rc = qed_fill_dev_info(cdev, &info->common);
+
+ info->primary_dbq_rq_addr =
+ qed_fcoe_get_primary_bdq_prod(hwfn, BDQ_ID_RQ);
+ info->secondary_bdq_rq_addr =
+ qed_fcoe_get_secondary_bdq_prod(hwfn, BDQ_ID_RQ);
+
+ info->wwpn = hwfn->mcp_info->func_info.wwn_port;
+ info->wwnn = hwfn->mcp_info->func_info.wwn_node;
+
+ info->num_cqs = FEAT_NUM(hwfn, QED_FCOE_CQ);
+
+ return rc;
+}
+
+static void qed_register_fcoe_ops(struct qed_dev *cdev,
+ struct qed_fcoe_cb_ops *ops, void *cookie)
+{
+ cdev->protocol_ops.fcoe = ops;
+ cdev->ops_cookie = cookie;
+}
+
+static struct qed_hash_fcoe_con *qed_fcoe_get_hash(struct qed_dev *cdev,
+ u32 handle)
+{
+ struct qed_hash_fcoe_con *hash_con = NULL;
+
+ if (!(cdev->flags & QED_FLAG_STORAGE_STARTED))
+ return NULL;
+
+ hash_for_each_possible(cdev->connections, hash_con, node, handle) {
+ if (hash_con->con->icid == handle)
+ break;
+ }
+
+ if (!hash_con || (hash_con->con->icid != handle))
+ return NULL;
+
+ return hash_con;
+}
+
+static int qed_fcoe_stop(struct qed_dev *cdev)
+{
+ struct qed_ptt *p_ptt;
+ int rc;
+
+ if (!(cdev->flags & QED_FLAG_STORAGE_STARTED)) {
+ DP_NOTICE(cdev, "fcoe already stopped\n");
+ return 0;
+ }
+
+ if (!hash_empty(cdev->connections)) {
+ DP_NOTICE(cdev,
+ "Can't stop fcoe - not all connections were returned\n");
+ return -EINVAL;
+ }
+
+ p_ptt = qed_ptt_acquire(QED_AFFIN_HWFN(cdev));
+ if (!p_ptt)
+ return -EAGAIN;
+
+ /* Stop the fcoe */
+ rc = qed_sp_fcoe_func_stop(QED_AFFIN_HWFN(cdev), p_ptt,
+ QED_SPQ_MODE_EBLOCK, NULL);
+ cdev->flags &= ~QED_FLAG_STORAGE_STARTED;
+ qed_ptt_release(QED_AFFIN_HWFN(cdev), p_ptt);
+
+ return rc;
+}
+
+static int qed_fcoe_start(struct qed_dev *cdev, struct qed_fcoe_tid *tasks)
+{
+ int rc;
+
+ if (cdev->flags & QED_FLAG_STORAGE_STARTED) {
+ DP_NOTICE(cdev, "fcoe already started;\n");
+ return 0;
+ }
+
+ rc = qed_sp_fcoe_func_start(QED_AFFIN_HWFN(cdev), QED_SPQ_MODE_EBLOCK,
+ NULL);
+ if (rc) {
+ DP_NOTICE(cdev, "Failed to start fcoe\n");
+ return rc;
+ }
+
+ cdev->flags |= QED_FLAG_STORAGE_STARTED;
+ hash_init(cdev->connections);
+
+ if (tasks) {
+ struct qed_tid_mem *tid_info = kzalloc(sizeof(*tid_info),
+ GFP_ATOMIC);
+
+ if (!tid_info) {
+ DP_NOTICE(cdev,
+ "Failed to allocate tasks information\n");
+ qed_fcoe_stop(cdev);
+ return -ENOMEM;
+ }
+
+ rc = qed_cxt_get_tid_mem_info(QED_AFFIN_HWFN(cdev), tid_info);
+ if (rc) {
+ DP_NOTICE(cdev, "Failed to gather task information\n");
+ qed_fcoe_stop(cdev);
+ kfree(tid_info);
+ return rc;
+ }
+
+ /* Fill task information */
+ tasks->size = tid_info->tid_size;
+ tasks->num_tids_per_block = tid_info->num_tids_per_block;
+ memcpy(tasks->blocks, tid_info->blocks,
+ MAX_TID_BLOCKS_FCOE * sizeof(u8 *));
+
+ kfree(tid_info);
+ }
+
+ return 0;
+}
+
+static int qed_fcoe_acquire_conn(struct qed_dev *cdev,
+ u32 *handle,
+ u32 *fw_cid, void __iomem **p_doorbell)
+{
+ struct qed_hash_fcoe_con *hash_con;
+ int rc;
+
+ /* Allocate a hashed connection */
+ hash_con = kzalloc(sizeof(*hash_con), GFP_KERNEL);
+ if (!hash_con) {
+ DP_NOTICE(cdev, "Failed to allocate hashed connection\n");
+ return -ENOMEM;
+ }
+
+ /* Acquire the connection */
+ rc = qed_fcoe_acquire_connection(QED_AFFIN_HWFN(cdev), NULL,
+ &hash_con->con);
+ if (rc) {
+ DP_NOTICE(cdev, "Failed to acquire Connection\n");
+ kfree(hash_con);
+ return rc;
+ }
+
+ /* Added the connection to hash table */
+ *handle = hash_con->con->icid;
+ *fw_cid = hash_con->con->fw_cid;
+ hash_add(cdev->connections, &hash_con->node, *handle);
+
+ if (p_doorbell)
+ *p_doorbell = qed_fcoe_get_db_addr(QED_AFFIN_HWFN(cdev),
+ *handle);
+
+ return 0;
+}
+
+static int qed_fcoe_release_conn(struct qed_dev *cdev, u32 handle)
+{
+ struct qed_hash_fcoe_con *hash_con;
+
+ hash_con = qed_fcoe_get_hash(cdev, handle);
+ if (!hash_con) {
+ DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
+ handle);
+ return -EINVAL;
+ }
+
+ hlist_del(&hash_con->node);
+ qed_fcoe_release_connection(QED_AFFIN_HWFN(cdev), hash_con->con);
+ kfree(hash_con);
+
+ return 0;
+}
+
+static int qed_fcoe_offload_conn(struct qed_dev *cdev,
+ u32 handle,
+ struct qed_fcoe_params_offload *conn_info)
+{
+ struct qed_hash_fcoe_con *hash_con;
+ struct qed_fcoe_conn *con;
+
+ hash_con = qed_fcoe_get_hash(cdev, handle);
+ if (!hash_con) {
+ DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
+ handle);
+ return -EINVAL;
+ }
+
+ /* Update the connection with information from the params */
+ con = hash_con->con;
+
+ con->sq_pbl_addr = conn_info->sq_pbl_addr;
+ con->sq_curr_page_addr = conn_info->sq_curr_page_addr;
+ con->sq_next_page_addr = conn_info->sq_next_page_addr;
+ con->tx_max_fc_pay_len = conn_info->tx_max_fc_pay_len;
+ con->e_d_tov_timer_val = conn_info->e_d_tov_timer_val;
+ con->rec_tov_timer_val = conn_info->rec_tov_timer_val;
+ con->rx_max_fc_pay_len = conn_info->rx_max_fc_pay_len;
+ con->vlan_tag = conn_info->vlan_tag;
+ con->max_conc_seqs_c3 = conn_info->max_conc_seqs_c3;
+ con->flags = conn_info->flags;
+ con->def_q_idx = conn_info->def_q_idx;
+
+ con->src_mac_addr_hi = (conn_info->src_mac[5] << 8) |
+ conn_info->src_mac[4];
+ con->src_mac_addr_mid = (conn_info->src_mac[3] << 8) |
+ conn_info->src_mac[2];
+ con->src_mac_addr_lo = (conn_info->src_mac[1] << 8) |
+ conn_info->src_mac[0];
+ con->dst_mac_addr_hi = (conn_info->dst_mac[5] << 8) |
+ conn_info->dst_mac[4];
+ con->dst_mac_addr_mid = (conn_info->dst_mac[3] << 8) |
+ conn_info->dst_mac[2];
+ con->dst_mac_addr_lo = (conn_info->dst_mac[1] << 8) |
+ conn_info->dst_mac[0];
+
+ con->s_id.addr_hi = conn_info->s_id.addr_hi;
+ con->s_id.addr_mid = conn_info->s_id.addr_mid;
+ con->s_id.addr_lo = conn_info->s_id.addr_lo;
+ con->d_id.addr_hi = conn_info->d_id.addr_hi;
+ con->d_id.addr_mid = conn_info->d_id.addr_mid;
+ con->d_id.addr_lo = conn_info->d_id.addr_lo;
+
+ return qed_sp_fcoe_conn_offload(QED_AFFIN_HWFN(cdev), con,
+ QED_SPQ_MODE_EBLOCK, NULL);
+}
+
+static int qed_fcoe_destroy_conn(struct qed_dev *cdev,
+ u32 handle, dma_addr_t terminate_params)
+{
+ struct qed_hash_fcoe_con *hash_con;
+ struct qed_fcoe_conn *con;
+
+ hash_con = qed_fcoe_get_hash(cdev, handle);
+ if (!hash_con) {
+ DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
+ handle);
+ return -EINVAL;
+ }
+
+ /* Update the connection with information from the params */
+ con = hash_con->con;
+ con->terminate_params = terminate_params;
+
+ return qed_sp_fcoe_conn_destroy(QED_AFFIN_HWFN(cdev), con,
+ QED_SPQ_MODE_EBLOCK, NULL);
+}
+
+static int qed_fcoe_stats_context(struct qed_dev *cdev,
+ struct qed_fcoe_stats *stats,
+ bool is_atomic)
+{
+ return qed_fcoe_get_stats(QED_AFFIN_HWFN(cdev), stats, is_atomic);
+}
+
+static int qed_fcoe_stats(struct qed_dev *cdev, struct qed_fcoe_stats *stats)
+{
+ return qed_fcoe_stats_context(cdev, stats, false);
+}
+
+void qed_get_protocol_stats_fcoe(struct qed_dev *cdev,
+ struct qed_mcp_fcoe_stats *stats,
+ bool is_atomic)
+{
+ struct qed_fcoe_stats proto_stats;
+
+ /* Retrieve FW statistics */
+ memset(&proto_stats, 0, sizeof(proto_stats));
+ if (qed_fcoe_stats_context(cdev, &proto_stats, is_atomic)) {
+ DP_VERBOSE(cdev, QED_MSG_STORAGE,
+ "Failed to collect FCoE statistics\n");
+ return;
+ }
+
+ /* Translate FW statistics into struct */
+ stats->rx_pkts = proto_stats.fcoe_rx_data_pkt_cnt +
+ proto_stats.fcoe_rx_xfer_pkt_cnt +
+ proto_stats.fcoe_rx_other_pkt_cnt;
+ stats->tx_pkts = proto_stats.fcoe_tx_data_pkt_cnt +
+ proto_stats.fcoe_tx_xfer_pkt_cnt +
+ proto_stats.fcoe_tx_other_pkt_cnt;
+ stats->fcs_err = proto_stats.fcoe_silent_drop_pkt_crc_error_cnt;
+
+ /* Request protocol driver to fill-in the rest */
+ if (cdev->protocol_ops.fcoe && cdev->ops_cookie) {
+ struct qed_fcoe_cb_ops *ops = cdev->protocol_ops.fcoe;
+ void *cookie = cdev->ops_cookie;
+
+ if (ops->get_login_failures)
+ stats->login_failure = ops->get_login_failures(cookie);
+ }
+}
+
+static const struct qed_fcoe_ops qed_fcoe_ops_pass = {
+ .common = &qed_common_ops_pass,
+ .ll2 = &qed_ll2_ops_pass,
+ .fill_dev_info = &qed_fill_fcoe_dev_info,
+ .start = &qed_fcoe_start,
+ .stop = &qed_fcoe_stop,
+ .register_ops = &qed_register_fcoe_ops,
+ .acquire_conn = &qed_fcoe_acquire_conn,
+ .release_conn = &qed_fcoe_release_conn,
+ .offload_conn = &qed_fcoe_offload_conn,
+ .destroy_conn = &qed_fcoe_destroy_conn,
+ .get_stats = &qed_fcoe_stats,
+};
+
+const struct qed_fcoe_ops *qed_get_fcoe_ops(void)
+{
+ return &qed_fcoe_ops_pass;
+}
+EXPORT_SYMBOL(qed_get_fcoe_ops);
+
+void qed_put_fcoe_ops(void)
+{
+}
+EXPORT_SYMBOL(qed_put_fcoe_ops);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.h b/drivers/net/ethernet/qlogic/qed/qed_fcoe.h
new file mode 100644
index 000000000..214e8299e
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ * Copyright (c) 2019-2020 Marvell International Ltd.
+ */
+
+#ifndef _QED_FCOE_H
+#define _QED_FCOE_H
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/qed/qed_fcoe_if.h>
+#include <linux/qed/qed_chain.h>
+#include "qed.h"
+#include "qed_hsi.h"
+#include "qed_mcp.h"
+#include "qed_sp.h"
+
+struct qed_fcoe_info {
+ spinlock_t lock; /* Connection resources. */
+ struct list_head free_list;
+};
+
+#if IS_ENABLED(CONFIG_QED_FCOE)
+int qed_fcoe_alloc(struct qed_hwfn *p_hwfn);
+
+void qed_fcoe_setup(struct qed_hwfn *p_hwfn);
+
+void qed_fcoe_free(struct qed_hwfn *p_hwfn);
+/**
+ * qed_get_protocol_stats_fcoe(): Fills provided statistics
+ * struct with statistics.
+ *
+ * @cdev: Qed dev pointer.
+ * @stats: Points to struct that will be filled with statistics.
+ * @is_atomic: Hint from the caller - if the func can sleep or not.
+ *
+ * Context: The function should not sleep in case is_atomic == true.
+ * Return: Void.
+ */
+void qed_get_protocol_stats_fcoe(struct qed_dev *cdev,
+ struct qed_mcp_fcoe_stats *stats,
+ bool is_atomic);
+#else /* CONFIG_QED_FCOE */
+static inline int qed_fcoe_alloc(struct qed_hwfn *p_hwfn)
+{
+ return -EINVAL;
+}
+
+static inline void qed_fcoe_setup(struct qed_hwfn *p_hwfn) {}
+static inline void qed_fcoe_free(struct qed_hwfn *p_hwfn) {}
+
+static inline void qed_get_protocol_stats_fcoe(struct qed_dev *cdev,
+ struct qed_mcp_fcoe_stats *stats,
+ bool is_atomic)
+{
+}
+#endif /* CONFIG_QED_FCOE */
+
+#endif /* _QED_FCOE_H */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
new file mode 100644
index 000000000..ed1a84542
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -0,0 +1,10932 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ * Copyright (c) 2019-2021 Marvell International Ltd.
+ */
+
+#ifndef _QED_HSI_H
+#define _QED_HSI_H
+
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/qed/common_hsi.h>
+#include <linux/qed/storage_common.h>
+#include <linux/qed/tcp_common.h>
+#include <linux/qed/fcoe_common.h>
+#include <linux/qed/eth_common.h>
+#include <linux/qed/iscsi_common.h>
+#include <linux/qed/nvmetcp_common.h>
+#include <linux/qed/iwarp_common.h>
+#include <linux/qed/rdma_common.h>
+#include <linux/qed/roce_common.h>
+#include <linux/qed/qed_fcoe_if.h>
+
+struct qed_hwfn;
+struct qed_ptt;
+
+/* Opcodes for the event ring */
+enum common_event_opcode {
+ COMMON_EVENT_PF_START,
+ COMMON_EVENT_PF_STOP,
+ COMMON_EVENT_VF_START,
+ COMMON_EVENT_VF_STOP,
+ COMMON_EVENT_VF_PF_CHANNEL,
+ COMMON_EVENT_VF_FLR,
+ COMMON_EVENT_PF_UPDATE,
+ COMMON_EVENT_FW_ERROR,
+ COMMON_EVENT_RL_UPDATE,
+ COMMON_EVENT_EMPTY,
+ MAX_COMMON_EVENT_OPCODE
+};
+
+/* Common Ramrod Command IDs */
+enum common_ramrod_cmd_id {
+ COMMON_RAMROD_UNUSED,
+ COMMON_RAMROD_PF_START,
+ COMMON_RAMROD_PF_STOP,
+ COMMON_RAMROD_VF_START,
+ COMMON_RAMROD_VF_STOP,
+ COMMON_RAMROD_PF_UPDATE,
+ COMMON_RAMROD_RL_UPDATE,
+ COMMON_RAMROD_EMPTY,
+ MAX_COMMON_RAMROD_CMD_ID
+};
+
+/* How ll2 should deal with packet upon errors */
+enum core_error_handle {
+ LL2_DROP_PACKET,
+ LL2_DO_NOTHING,
+ LL2_ASSERT,
+ MAX_CORE_ERROR_HANDLE
+};
+
+/* Opcodes for the event ring */
+enum core_event_opcode {
+ CORE_EVENT_TX_QUEUE_START,
+ CORE_EVENT_TX_QUEUE_STOP,
+ CORE_EVENT_RX_QUEUE_START,
+ CORE_EVENT_RX_QUEUE_STOP,
+ CORE_EVENT_RX_QUEUE_FLUSH,
+ CORE_EVENT_TX_QUEUE_UPDATE,
+ CORE_EVENT_QUEUE_STATS_QUERY,
+ MAX_CORE_EVENT_OPCODE
+};
+
+/* The L4 pseudo checksum mode for Core */
+enum core_l4_pseudo_checksum_mode {
+ CORE_L4_PSEUDO_CSUM_CORRECT_LENGTH,
+ CORE_L4_PSEUDO_CSUM_ZERO_LENGTH,
+ MAX_CORE_L4_PSEUDO_CHECKSUM_MODE
+};
+
+/* LL2 SP error code */
+enum core_ll2_error_code {
+ LL2_OK = 0,
+ LL2_ERROR,
+ MAX_CORE_LL2_ERROR_CODE
+};
+
+/* Light-L2 RX Producers in Tstorm RAM */
+struct core_ll2_port_stats {
+ struct regpair gsi_invalid_hdr;
+ struct regpair gsi_invalid_pkt_length;
+ struct regpair gsi_unsupported_pkt_typ;
+ struct regpair gsi_crcchksm_error;
+};
+
+/* LL2 TX Per Queue Stats */
+struct core_ll2_pstorm_per_queue_stat {
+ struct regpair sent_ucast_bytes;
+ struct regpair sent_mcast_bytes;
+ struct regpair sent_bcast_bytes;
+ struct regpair sent_ucast_pkts;
+ struct regpair sent_mcast_pkts;
+ struct regpair sent_bcast_pkts;
+ struct regpair error_drop_pkts;
+};
+
+/* Light-L2 RX Producers in Tstorm RAM */
+struct core_ll2_rx_prod {
+ __le16 bd_prod;
+ __le16 cqe_prod;
+};
+
+struct core_ll2_tstorm_per_queue_stat {
+ struct regpair packet_too_big_discard;
+ struct regpair no_buff_discard;
+};
+
+struct core_ll2_ustorm_per_queue_stat {
+ struct regpair rcv_ucast_bytes;
+ struct regpair rcv_mcast_bytes;
+ struct regpair rcv_bcast_bytes;
+ struct regpair rcv_ucast_pkts;
+ struct regpair rcv_mcast_pkts;
+ struct regpair rcv_bcast_pkts;
+};
+
+struct core_ll2_rx_per_queue_stat {
+ struct core_ll2_tstorm_per_queue_stat tstorm_stat;
+ struct core_ll2_ustorm_per_queue_stat ustorm_stat;
+};
+
+struct core_ll2_tx_per_queue_stat {
+ struct core_ll2_pstorm_per_queue_stat pstorm_stat;
+};
+
+/* Structure for doorbell data, in PWM mode, for RX producers update. */
+struct core_pwm_prod_update_data {
+ __le16 icid; /* internal CID */
+ u8 reserved0;
+ u8 params;
+#define CORE_PWM_PROD_UPDATE_DATA_AGG_CMD_MASK 0x3
+#define CORE_PWM_PROD_UPDATE_DATA_AGG_CMD_SHIFT 0
+#define CORE_PWM_PROD_UPDATE_DATA_RESERVED1_MASK 0x3F /* Set 0 */
+#define CORE_PWM_PROD_UPDATE_DATA_RESERVED1_SHIFT 2
+ struct core_ll2_rx_prod prod; /* Producers */
+};
+
+/* Ramrod data for rx/tx queue statistics query ramrod */
+struct core_queue_stats_query_ramrod_data {
+ u8 rx_stat;
+ u8 tx_stat;
+ __le16 reserved[3];
+ struct regpair rx_stat_addr;
+ struct regpair tx_stat_addr;
+};
+
+/* Core Ramrod Command IDs (light L2) */
+enum core_ramrod_cmd_id {
+ CORE_RAMROD_UNUSED,
+ CORE_RAMROD_RX_QUEUE_START,
+ CORE_RAMROD_TX_QUEUE_START,
+ CORE_RAMROD_RX_QUEUE_STOP,
+ CORE_RAMROD_TX_QUEUE_STOP,
+ CORE_RAMROD_RX_QUEUE_FLUSH,
+ CORE_RAMROD_TX_QUEUE_UPDATE,
+ CORE_RAMROD_QUEUE_STATS_QUERY,
+ MAX_CORE_RAMROD_CMD_ID
+};
+
+/* Core RX CQE Type for Light L2 */
+enum core_roce_flavor_type {
+ CORE_ROCE,
+ CORE_RROCE,
+ MAX_CORE_ROCE_FLAVOR_TYPE
+};
+
+/* Specifies how ll2 should deal with packets errors: packet_too_big and
+ * no_buff.
+ */
+struct core_rx_action_on_error {
+ u8 error_type;
+#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_MASK 0x3
+#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_SHIFT 0
+#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_MASK 0x3
+#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_SHIFT 2
+#define CORE_RX_ACTION_ON_ERROR_RESERVED_MASK 0xF
+#define CORE_RX_ACTION_ON_ERROR_RESERVED_SHIFT 4
+};
+
+/* Core RX BD for Light L2 */
+struct core_rx_bd {
+ struct regpair addr;
+ __le16 reserved[4];
+};
+
+/* Core RX CM offload BD for Light L2 */
+struct core_rx_bd_with_buff_len {
+ struct regpair addr;
+ __le16 buff_length;
+ __le16 reserved[3];
+};
+
+/* Core RX CM offload BD for Light L2 */
+union core_rx_bd_union {
+ struct core_rx_bd rx_bd;
+ struct core_rx_bd_with_buff_len rx_bd_with_len;
+};
+
+/* Opaque Data for Light L2 RX CQE */
+struct core_rx_cqe_opaque_data {
+ __le32 data[2];
+};
+
+/* Core RX CQE Type for Light L2 */
+enum core_rx_cqe_type {
+ CORE_RX_CQE_ILLEGAL_TYPE,
+ CORE_RX_CQE_TYPE_REGULAR,
+ CORE_RX_CQE_TYPE_GSI_OFFLOAD,
+ CORE_RX_CQE_TYPE_SLOW_PATH,
+ MAX_CORE_RX_CQE_TYPE
+};
+
+/* Core RX CQE for Light L2 */
+struct core_rx_fast_path_cqe {
+ u8 type;
+ u8 placement_offset;
+ struct parsing_and_err_flags parse_flags;
+ __le16 packet_length;
+ __le16 vlan;
+ struct core_rx_cqe_opaque_data opaque_data;
+ struct parsing_err_flags err_flags;
+ u8 packet_source;
+ u8 reserved0;
+ __le32 reserved1[3];
+};
+
+/* Core Rx CM offload CQE */
+struct core_rx_gsi_offload_cqe {
+ u8 type;
+ u8 data_length_error;
+ struct parsing_and_err_flags parse_flags;
+ __le16 data_length;
+ __le16 vlan;
+ __le32 src_mac_addrhi;
+ __le16 src_mac_addrlo;
+ __le16 qp_id;
+ __le32 src_qp;
+ struct core_rx_cqe_opaque_data opaque_data;
+ u8 packet_source;
+ u8 reserved[3];
+};
+
+/* Core RX CQE for Light L2 */
+struct core_rx_slow_path_cqe {
+ u8 type;
+ u8 ramrod_cmd_id;
+ __le16 echo;
+ struct core_rx_cqe_opaque_data opaque_data;
+ __le32 reserved1[5];
+};
+
+/* Core RX CM offload BD for Light L2 */
+union core_rx_cqe_union {
+ struct core_rx_fast_path_cqe rx_cqe_fp;
+ struct core_rx_gsi_offload_cqe rx_cqe_gsi;
+ struct core_rx_slow_path_cqe rx_cqe_sp;
+};
+
+/* RX packet source. */
+enum core_rx_pkt_source {
+ CORE_RX_PKT_SOURCE_NETWORK = 0,
+ CORE_RX_PKT_SOURCE_LB,
+ CORE_RX_PKT_SOURCE_TX,
+ CORE_RX_PKT_SOURCE_LL2_TX,
+ MAX_CORE_RX_PKT_SOURCE
+};
+
+/* Ramrod data for rx queue start ramrod */
+struct core_rx_start_ramrod_data {
+ struct regpair bd_base;
+ struct regpair cqe_pbl_addr;
+ __le16 mtu;
+ __le16 sb_id;
+ u8 sb_index;
+ u8 complete_cqe_flg;
+ u8 complete_event_flg;
+ u8 drop_ttl0_flg;
+ __le16 num_of_pbl_pages;
+ u8 inner_vlan_stripping_en;
+ u8 report_outer_vlan;
+ u8 queue_id;
+ u8 main_func_queue;
+ u8 mf_si_bcast_accept_all;
+ u8 mf_si_mcast_accept_all;
+ struct core_rx_action_on_error action_on_error;
+ u8 gsi_offload_flag;
+ u8 vport_id_valid;
+ u8 vport_id;
+ u8 zero_prod_flg;
+ u8 wipe_inner_vlan_pri_en;
+ u8 reserved[2];
+};
+
+/* Ramrod data for rx queue stop ramrod */
+struct core_rx_stop_ramrod_data {
+ u8 complete_cqe_flg;
+ u8 complete_event_flg;
+ u8 queue_id;
+ u8 reserved1;
+ __le16 reserved2[2];
+};
+
+/* Flags for Core TX BD */
+struct core_tx_bd_data {
+ __le16 as_bitfield;
+#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_MASK 0x1
+#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_SHIFT 0
+#define CORE_TX_BD_DATA_VLAN_INSERTION_MASK 0x1
+#define CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT 1
+#define CORE_TX_BD_DATA_START_BD_MASK 0x1
+#define CORE_TX_BD_DATA_START_BD_SHIFT 2
+#define CORE_TX_BD_DATA_IP_CSUM_MASK 0x1
+#define CORE_TX_BD_DATA_IP_CSUM_SHIFT 3
+#define CORE_TX_BD_DATA_L4_CSUM_MASK 0x1
+#define CORE_TX_BD_DATA_L4_CSUM_SHIFT 4
+#define CORE_TX_BD_DATA_IPV6_EXT_MASK 0x1
+#define CORE_TX_BD_DATA_IPV6_EXT_SHIFT 5
+#define CORE_TX_BD_DATA_L4_PROTOCOL_MASK 0x1
+#define CORE_TX_BD_DATA_L4_PROTOCOL_SHIFT 6
+#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_MASK 0x1
+#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_SHIFT 7
+#define CORE_TX_BD_DATA_NBDS_MASK 0xF
+#define CORE_TX_BD_DATA_NBDS_SHIFT 8
+#define CORE_TX_BD_DATA_ROCE_FLAV_MASK 0x1
+#define CORE_TX_BD_DATA_ROCE_FLAV_SHIFT 12
+#define CORE_TX_BD_DATA_IP_LEN_MASK 0x1
+#define CORE_TX_BD_DATA_IP_LEN_SHIFT 13
+#define CORE_TX_BD_DATA_DISABLE_STAG_INSERTION_MASK 0x1
+#define CORE_TX_BD_DATA_DISABLE_STAG_INSERTION_SHIFT 14
+#define CORE_TX_BD_DATA_RESERVED0_MASK 0x1
+#define CORE_TX_BD_DATA_RESERVED0_SHIFT 15
+};
+
+/* Core TX BD for Light L2 */
+struct core_tx_bd {
+ struct regpair addr;
+ __le16 nbytes;
+ __le16 nw_vlan_or_lb_echo;
+ struct core_tx_bd_data bd_data;
+ __le16 bitfield1;
+#define CORE_TX_BD_L4_HDR_OFFSET_W_MASK 0x3FFF
+#define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT 0
+#define CORE_TX_BD_TX_DST_MASK 0x3
+#define CORE_TX_BD_TX_DST_SHIFT 14
+};
+
+/* Light L2 TX Destination */
+enum core_tx_dest {
+ CORE_TX_DEST_NW,
+ CORE_TX_DEST_LB,
+ CORE_TX_DEST_RESERVED,
+ CORE_TX_DEST_DROP,
+ MAX_CORE_TX_DEST
+};
+
+/* Ramrod data for tx queue start ramrod */
+struct core_tx_start_ramrod_data {
+ struct regpair pbl_base_addr;
+ __le16 mtu;
+ __le16 sb_id;
+ u8 sb_index;
+ u8 stats_en;
+ u8 stats_id;
+ u8 conn_type;
+ __le16 pbl_size;
+ __le16 qm_pq_id;
+ u8 gsi_offload_flag;
+ u8 ctx_stats_en;
+ u8 vport_id_valid;
+ u8 vport_id;
+ u8 enforce_security_flag;
+ u8 reserved[7];
+};
+
+/* Ramrod data for tx queue stop ramrod */
+struct core_tx_stop_ramrod_data {
+ __le32 reserved0[2];
+};
+
+/* Ramrod data for tx queue update ramrod */
+struct core_tx_update_ramrod_data {
+ u8 update_qm_pq_id_flg;
+ u8 reserved0;
+ __le16 qm_pq_id;
+ __le32 reserved1[1];
+};
+
+/* Enum flag for what type of dcb data to update */
+enum dcb_dscp_update_mode {
+ DONT_UPDATE_DCB_DSCP,
+ UPDATE_DCB,
+ UPDATE_DSCP,
+ UPDATE_DCB_DSCP,
+ MAX_DCB_DSCP_UPDATE_MODE
+};
+
+/* The core storm context for the Ystorm */
+struct ystorm_core_conn_st_ctx {
+ __le32 reserved[4];
+};
+
+/* The core storm context for the Pstorm */
+struct pstorm_core_conn_st_ctx {
+ __le32 reserved[20];
+};
+
+/* Core Slowpath Connection storm context of Xstorm */
+struct xstorm_core_conn_st_ctx {
+ struct regpair spq_base_addr;
+ __le32 reserved0[2];
+ __le16 spq_cons;
+ __le16 reserved1[111];
+};
+
+struct xstorm_core_conn_ag_ctx {
+ u8 reserved0;
+ u8 state;
+ u8 flags0;
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT 7
+ u8 flags1;
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_BIT12_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_BIT13_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+ u8 flags2;
+#define XSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 6
+ u8 flags3;
+#define XSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 6
+ u8 flags4;
+#define XSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT 6
+ u8 flags5;
+#define XSTORM_CORE_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT 6
+ u8 flags6;
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+ u8 flags7;
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 7
+ u8 flags8;
+#define XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 7
+ u8 flags9;
+#define XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT 7
+ u8 flags10;
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT 7
+ u8 flags11;
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT 7
+ u8 flags12;
+#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT 7
+ u8 flags13;
+#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define XSTORM_CORE_CONN_AG_CTX_BIT16_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_BIT17_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_BIT18_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_BIT19_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_BIT20_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_BIT21_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_CF23_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT 6
+ u8 byte2;
+ __le16 physical_q0;
+ __le16 consolid_prod;
+ __le16 reserved16;
+ __le16 tx_bd_cons;
+ __le16 tx_bd_or_spq_prod;
+ __le16 updated_qm_pq_id;
+ __le16 conn_dpi;
+ u8 byte3;
+ u8 byte4;
+ u8 byte5;
+ u8 byte6;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le32 reg4;
+ __le32 reg5;
+ __le32 reg6;
+ __le16 word7;
+ __le16 word8;
+ __le16 word9;
+ __le16 word10;
+ __le32 reg7;
+ __le32 reg8;
+ __le32 reg9;
+ u8 byte7;
+ u8 byte8;
+ u8 byte9;
+ u8 byte10;
+ u8 byte11;
+ u8 byte12;
+ u8 byte13;
+ u8 byte14;
+ u8 byte15;
+ u8 e5_reserved;
+ __le16 word11;
+ __le32 reg10;
+ __le32 reg11;
+ __le32 reg12;
+ __le32 reg13;
+ __le32 reg14;
+ __le32 reg15;
+ __le32 reg16;
+ __le32 reg17;
+ __le32 reg18;
+ __le32 reg19;
+ __le16 word12;
+ __le16 word13;
+ __le16 word14;
+ __le16 word15;
+};
+
+struct tstorm_core_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6
+ u8 flags1;
+#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6
+ u8 flags2;
+#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6
+ u8 flags3;
+#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7
+ u8 flags4;
+#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags5;
+#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le32 reg4;
+ __le32 reg5;
+ __le32 reg6;
+ __le32 reg7;
+ __le32 reg8;
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ u8 byte4;
+ u8 byte5;
+ __le16 word1;
+ __le16 word2;
+ __le16 word3;
+ __le32 ll2_rx_prod;
+ __le32 reg10;
+};
+
+struct ustorm_core_conn_ag_ctx {
+ u8 reserved;
+ u8 byte1;
+ u8 flags0;
+#define USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6
+ u8 flags2;
+#define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5
+#define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags3;
+#define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le16 word1;
+ __le32 rx_producers;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le16 word2;
+ __le16 word3;
+};
+
+/* The core storm context for the Mstorm */
+struct mstorm_core_conn_st_ctx {
+ __le32 reserved[40];
+};
+
+/* The core storm context for the Ustorm */
+struct ustorm_core_conn_st_ctx {
+ __le32 reserved[20];
+};
+
+/* The core storm context for the Tstorm */
+struct tstorm_core_conn_st_ctx {
+ __le32 reserved[4];
+};
+
+/* core connection context */
+struct core_conn_context {
+ struct ystorm_core_conn_st_ctx ystorm_st_context;
+ struct regpair ystorm_st_padding[2];
+ struct pstorm_core_conn_st_ctx pstorm_st_context;
+ struct regpair pstorm_st_padding[2];
+ struct xstorm_core_conn_st_ctx xstorm_st_context;
+ struct xstorm_core_conn_ag_ctx xstorm_ag_context;
+ struct tstorm_core_conn_ag_ctx tstorm_ag_context;
+ struct ustorm_core_conn_ag_ctx ustorm_ag_context;
+ struct mstorm_core_conn_st_ctx mstorm_st_context;
+ struct ustorm_core_conn_st_ctx ustorm_st_context;
+ struct regpair ustorm_st_padding[2];
+ struct tstorm_core_conn_st_ctx tstorm_st_context;
+ struct regpair tstorm_st_padding[2];
+};
+
+struct eth_mstorm_per_pf_stat {
+ struct regpair gre_discard_pkts;
+ struct regpair vxlan_discard_pkts;
+ struct regpair geneve_discard_pkts;
+ struct regpair lb_discard_pkts;
+};
+
+struct eth_mstorm_per_queue_stat {
+ struct regpair ttl0_discard;
+ struct regpair packet_too_big_discard;
+ struct regpair no_buff_discard;
+ struct regpair not_active_discard;
+ struct regpair tpa_coalesced_pkts;
+ struct regpair tpa_coalesced_events;
+ struct regpair tpa_aborts_num;
+ struct regpair tpa_coalesced_bytes;
+};
+
+/* Ethernet TX Per PF */
+struct eth_pstorm_per_pf_stat {
+ struct regpair sent_lb_ucast_bytes;
+ struct regpair sent_lb_mcast_bytes;
+ struct regpair sent_lb_bcast_bytes;
+ struct regpair sent_lb_ucast_pkts;
+ struct regpair sent_lb_mcast_pkts;
+ struct regpair sent_lb_bcast_pkts;
+ struct regpair sent_gre_bytes;
+ struct regpair sent_vxlan_bytes;
+ struct regpair sent_geneve_bytes;
+ struct regpair sent_mpls_bytes;
+ struct regpair sent_gre_mpls_bytes;
+ struct regpair sent_udp_mpls_bytes;
+ struct regpair sent_gre_pkts;
+ struct regpair sent_vxlan_pkts;
+ struct regpair sent_geneve_pkts;
+ struct regpair sent_mpls_pkts;
+ struct regpair sent_gre_mpls_pkts;
+ struct regpair sent_udp_mpls_pkts;
+ struct regpair gre_drop_pkts;
+ struct regpair vxlan_drop_pkts;
+ struct regpair geneve_drop_pkts;
+ struct regpair mpls_drop_pkts;
+ struct regpair gre_mpls_drop_pkts;
+ struct regpair udp_mpls_drop_pkts;
+};
+
+/* Ethernet TX Per Queue Stats */
+struct eth_pstorm_per_queue_stat {
+ struct regpair sent_ucast_bytes;
+ struct regpair sent_mcast_bytes;
+ struct regpair sent_bcast_bytes;
+ struct regpair sent_ucast_pkts;
+ struct regpair sent_mcast_pkts;
+ struct regpair sent_bcast_pkts;
+ struct regpair error_drop_pkts;
+};
+
+/* ETH Rx producers data */
+struct eth_rx_rate_limit {
+ __le16 mult;
+ __le16 cnst;
+ u8 add_sub_cnst;
+ u8 reserved0;
+ __le16 reserved1;
+};
+
+/* Update RSS indirection table entry command */
+struct eth_tstorm_rss_update_data {
+ u8 vport_id;
+ u8 ind_table_index;
+ __le16 ind_table_value;
+ __le16 reserved1;
+ u8 reserved;
+ u8 valid;
+};
+
+struct eth_ustorm_per_pf_stat {
+ struct regpair rcv_lb_ucast_bytes;
+ struct regpair rcv_lb_mcast_bytes;
+ struct regpair rcv_lb_bcast_bytes;
+ struct regpair rcv_lb_ucast_pkts;
+ struct regpair rcv_lb_mcast_pkts;
+ struct regpair rcv_lb_bcast_pkts;
+ struct regpair rcv_gre_bytes;
+ struct regpair rcv_vxlan_bytes;
+ struct regpair rcv_geneve_bytes;
+ struct regpair rcv_gre_pkts;
+ struct regpair rcv_vxlan_pkts;
+ struct regpair rcv_geneve_pkts;
+};
+
+struct eth_ustorm_per_queue_stat {
+ struct regpair rcv_ucast_bytes;
+ struct regpair rcv_mcast_bytes;
+ struct regpair rcv_bcast_bytes;
+ struct regpair rcv_ucast_pkts;
+ struct regpair rcv_mcast_pkts;
+ struct regpair rcv_bcast_pkts;
+};
+
+/* Event Ring VF-PF Channel data */
+struct vf_pf_channel_eqe_data {
+ struct regpair msg_addr;
+};
+
+/* Event Ring initial cleanup data */
+struct initial_cleanup_eqe_data {
+ u8 vf_id;
+ u8 reserved[7];
+};
+
+/* FW error data */
+struct fw_err_data {
+ u8 recovery_scope;
+ u8 err_id;
+ __le16 entity_id;
+ u8 reserved[4];
+};
+
+/* Event Data Union */
+union event_ring_data {
+ u8 bytes[8];
+ struct vf_pf_channel_eqe_data vf_pf_channel;
+ struct iscsi_eqe_data iscsi_info;
+ struct iscsi_connect_done_results iscsi_conn_done_info;
+ union rdma_eqe_data rdma_data;
+ struct initial_cleanup_eqe_data vf_init_cleanup;
+ struct fw_err_data err_data;
+};
+
+/* Event Ring Entry */
+struct event_ring_entry {
+ u8 protocol_id;
+ u8 opcode;
+ u8 reserved0;
+ u8 vf_id;
+ __le16 echo;
+ u8 fw_return_code;
+ u8 flags;
+#define EVENT_RING_ENTRY_ASYNC_MASK 0x1
+#define EVENT_RING_ENTRY_ASYNC_SHIFT 0
+#define EVENT_RING_ENTRY_RESERVED1_MASK 0x7F
+#define EVENT_RING_ENTRY_RESERVED1_SHIFT 1
+ union event_ring_data data;
+};
+
+/* Event Ring Next Page Address */
+struct event_ring_next_addr {
+ struct regpair addr;
+ __le32 reserved[2];
+};
+
+/* Event Ring Element */
+union event_ring_element {
+ struct event_ring_entry entry;
+ struct event_ring_next_addr next_addr;
+};
+
+/* Ports mode */
+enum fw_flow_ctrl_mode {
+ flow_ctrl_pause,
+ flow_ctrl_pfc,
+ MAX_FW_FLOW_CTRL_MODE
+};
+
+/* GFT profile type */
+enum gft_profile_type {
+ GFT_PROFILE_TYPE_4_TUPLE,
+ GFT_PROFILE_TYPE_L4_DST_PORT,
+ GFT_PROFILE_TYPE_IP_DST_ADDR,
+ GFT_PROFILE_TYPE_IP_SRC_ADDR,
+ GFT_PROFILE_TYPE_TUNNEL_TYPE,
+ MAX_GFT_PROFILE_TYPE
+};
+
+/* Major and Minor hsi Versions */
+struct hsi_fp_ver_struct {
+ u8 minor_ver_arr[2];
+ u8 major_ver_arr[2];
+};
+
+/* Integration Phase */
+enum integ_phase {
+ INTEG_PHASE_BB_A0_LATEST = 3,
+ INTEG_PHASE_BB_B0_NO_MCP = 10,
+ INTEG_PHASE_BB_B0_WITH_MCP = 11,
+ MAX_INTEG_PHASE
+};
+
+/* Ports mode */
+enum iwarp_ll2_tx_queues {
+ IWARP_LL2_IN_ORDER_TX_QUEUE = 1,
+ IWARP_LL2_ALIGNED_TX_QUEUE,
+ IWARP_LL2_ALIGNED_RIGHT_TRIMMED_TX_QUEUE,
+ IWARP_LL2_ERROR,
+ MAX_IWARP_LL2_TX_QUEUES
+};
+
+/* Function error ID */
+enum func_err_id {
+ FUNC_NO_ERROR,
+ VF_PF_CHANNEL_NOT_READY,
+ VF_ZONE_MSG_NOT_VALID,
+ VF_ZONE_FUNC_NOT_ENABLED,
+ ETH_PACKET_TOO_SMALL,
+ ETH_ILLEGAL_VLAN_MODE,
+ ETH_MTU_VIOLATION,
+ ETH_ILLEGAL_INBAND_TAGS,
+ ETH_VLAN_INSERT_AND_INBAND_VLAN,
+ ETH_ILLEGAL_NBDS,
+ ETH_FIRST_BD_WO_SOP,
+ ETH_INSUFFICIENT_BDS,
+ ETH_ILLEGAL_LSO_HDR_NBDS,
+ ETH_ILLEGAL_LSO_MSS,
+ ETH_ZERO_SIZE_BD,
+ ETH_ILLEGAL_LSO_HDR_LEN,
+ ETH_INSUFFICIENT_PAYLOAD,
+ ETH_EDPM_OUT_OF_SYNC,
+ ETH_TUNN_IPV6_EXT_NBD_ERR,
+ ETH_CONTROL_PACKET_VIOLATION,
+ ETH_ANTI_SPOOFING_ERR,
+ ETH_PACKET_SIZE_TOO_LARGE,
+ CORE_ILLEGAL_VLAN_MODE,
+ CORE_ILLEGAL_NBDS,
+ CORE_FIRST_BD_WO_SOP,
+ CORE_INSUFFICIENT_BDS,
+ CORE_PACKET_TOO_SMALL,
+ CORE_ILLEGAL_INBAND_TAGS,
+ CORE_VLAN_INSERT_AND_INBAND_VLAN,
+ CORE_MTU_VIOLATION,
+ CORE_CONTROL_PACKET_VIOLATION,
+ CORE_ANTI_SPOOFING_ERR,
+ CORE_PACKET_SIZE_TOO_LARGE,
+ CORE_ILLEGAL_BD_FLAGS,
+ CORE_GSI_PACKET_VIOLATION,
+ MAX_FUNC_ERR_ID
+};
+
+/* FW error handling mode */
+enum fw_err_mode {
+ FW_ERR_FATAL_ASSERT,
+ FW_ERR_DRV_REPORT,
+ MAX_FW_ERR_MODE
+};
+
+/* FW error recovery scope */
+enum fw_err_recovery_scope {
+ ERR_SCOPE_INVALID,
+ ERR_SCOPE_TX_Q,
+ ERR_SCOPE_RX_Q,
+ ERR_SCOPE_QP,
+ ERR_SCOPE_VPORT,
+ ERR_SCOPE_FUNC,
+ ERR_SCOPE_PORT,
+ ERR_SCOPE_ENGINE,
+ MAX_FW_ERR_RECOVERY_SCOPE
+};
+
+/* Mstorm non-triggering VF zone */
+struct mstorm_non_trigger_vf_zone {
+ struct eth_mstorm_per_queue_stat eth_queue_stat;
+ struct eth_rx_prod_data eth_rx_queue_producers[ETH_MAX_RXQ_VF_QUAD];
+};
+
+/* Mstorm VF zone */
+struct mstorm_vf_zone {
+ struct mstorm_non_trigger_vf_zone non_trigger;
+};
+
+/* vlan header including TPID and TCI fields */
+struct vlan_header {
+ __le16 tpid;
+ __le16 tci;
+};
+
+/* outer tag configurations */
+struct outer_tag_config_struct {
+ u8 enable_stag_pri_change;
+ u8 pri_map_valid;
+ u8 reserved[2];
+ struct vlan_header outer_tag;
+ u8 inner_to_outer_pri_map[8];
+};
+
+/* personality per PF */
+enum personality_type {
+ BAD_PERSONALITY_TYP,
+ PERSONALITY_TCP_ULP,
+ PERSONALITY_FCOE,
+ PERSONALITY_RDMA_AND_ETH,
+ PERSONALITY_RDMA,
+ PERSONALITY_CORE,
+ PERSONALITY_ETH,
+ PERSONALITY_RESERVED,
+ MAX_PERSONALITY_TYPE
+};
+
+/* tunnel configuration */
+struct pf_start_tunnel_config {
+ u8 set_vxlan_udp_port_flg;
+ u8 set_geneve_udp_port_flg;
+ u8 set_no_inner_l2_vxlan_udp_port_flg;
+ u8 tunnel_clss_vxlan;
+ u8 tunnel_clss_l2geneve;
+ u8 tunnel_clss_ipgeneve;
+ u8 tunnel_clss_l2gre;
+ u8 tunnel_clss_ipgre;
+ __le16 vxlan_udp_port;
+ __le16 geneve_udp_port;
+ __le16 no_inner_l2_vxlan_udp_port;
+ __le16 reserved[3];
+};
+
+/* Ramrod data for PF start ramrod */
+struct pf_start_ramrod_data {
+ struct regpair event_ring_pbl_addr;
+ struct regpair consolid_q_pbl_base_addr;
+ struct pf_start_tunnel_config tunnel_config;
+ __le16 event_ring_sb_id;
+ u8 base_vf_id;
+ u8 num_vfs;
+ u8 event_ring_num_pages;
+ u8 event_ring_sb_index;
+ u8 path_id;
+ u8 warning_as_error;
+ u8 dont_log_ramrods;
+ u8 personality;
+ __le16 log_type_mask;
+ u8 mf_mode;
+ u8 integ_phase;
+ u8 allow_npar_tx_switching;
+ u8 reserved0;
+ struct hsi_fp_ver_struct hsi_fp_ver;
+ struct outer_tag_config_struct outer_tag_config;
+ u8 pf_fp_err_mode;
+ u8 consolid_q_num_pages;
+ u8 reserved[6];
+};
+
+/* Data for port update ramrod */
+struct protocol_dcb_data {
+ u8 dcb_enable_flag;
+ u8 dscp_enable_flag;
+ u8 dcb_priority;
+ u8 dcb_tc;
+ u8 dscp_val;
+ u8 dcb_dont_add_vlan0;
+};
+
+/* Update tunnel configuration */
+struct pf_update_tunnel_config {
+ u8 update_rx_pf_clss;
+ u8 update_rx_def_ucast_clss;
+ u8 update_rx_def_non_ucast_clss;
+ u8 set_vxlan_udp_port_flg;
+ u8 set_geneve_udp_port_flg;
+ u8 set_no_inner_l2_vxlan_udp_port_flg;
+ u8 tunnel_clss_vxlan;
+ u8 tunnel_clss_l2geneve;
+ u8 tunnel_clss_ipgeneve;
+ u8 tunnel_clss_l2gre;
+ u8 tunnel_clss_ipgre;
+ u8 reserved;
+ __le16 vxlan_udp_port;
+ __le16 geneve_udp_port;
+ __le16 no_inner_l2_vxlan_udp_port;
+ __le16 reserved1[3];
+};
+
+/* Data for port update ramrod */
+struct pf_update_ramrod_data {
+ u8 update_eth_dcb_data_mode;
+ u8 update_fcoe_dcb_data_mode;
+ u8 update_iscsi_dcb_data_mode;
+ u8 update_roce_dcb_data_mode;
+ u8 update_rroce_dcb_data_mode;
+ u8 update_iwarp_dcb_data_mode;
+ u8 update_mf_vlan_flag;
+ u8 update_enable_stag_pri_change;
+ struct protocol_dcb_data eth_dcb_data;
+ struct protocol_dcb_data fcoe_dcb_data;
+ struct protocol_dcb_data iscsi_dcb_data;
+ struct protocol_dcb_data roce_dcb_data;
+ struct protocol_dcb_data rroce_dcb_data;
+ struct protocol_dcb_data iwarp_dcb_data;
+ __le16 mf_vlan;
+ u8 enable_stag_pri_change;
+ u8 reserved;
+ struct pf_update_tunnel_config tunnel_config;
+};
+
+/* Ports mode */
+enum ports_mode {
+ ENGX2_PORTX1,
+ ENGX2_PORTX2,
+ ENGX1_PORTX1,
+ ENGX1_PORTX2,
+ ENGX1_PORTX4,
+ MAX_PORTS_MODE
+};
+
+/* Protocol-common error code */
+enum protocol_common_error_code {
+ COMMON_ERR_CODE_OK = 0,
+ COMMON_ERR_CODE_ERROR,
+ MAX_PROTOCOL_COMMON_ERROR_CODE
+};
+
+/* use to index in hsi_fp_[major|minor]_ver_arr per protocol */
+enum protocol_version_array_key {
+ ETH_VER_KEY = 0,
+ ROCE_VER_KEY,
+ MAX_PROTOCOL_VERSION_ARRAY_KEY
+};
+
+/* RDMA TX Stats */
+struct rdma_sent_stats {
+ struct regpair sent_bytes;
+ struct regpair sent_pkts;
+};
+
+/* Pstorm non-triggering VF zone */
+struct pstorm_non_trigger_vf_zone {
+ struct eth_pstorm_per_queue_stat eth_queue_stat;
+ struct rdma_sent_stats rdma_stats;
+};
+
+/* Pstorm VF zone */
+struct pstorm_vf_zone {
+ struct pstorm_non_trigger_vf_zone non_trigger;
+ struct regpair reserved[7];
+};
+
+/* Ramrod Header of SPQE */
+struct ramrod_header {
+ __le32 cid;
+ u8 cmd_id;
+ u8 protocol_id;
+ __le16 echo;
+};
+
+/* RDMA RX Stats */
+struct rdma_rcv_stats {
+ struct regpair rcv_bytes;
+ struct regpair rcv_pkts;
+};
+
+/* Data for update QCN/DCQCN RL ramrod */
+struct rl_update_ramrod_data {
+ u8 qcn_update_param_flg;
+ u8 dcqcn_update_param_flg;
+ u8 rl_init_flg;
+ u8 rl_start_flg;
+ u8 rl_stop_flg;
+ u8 rl_id_first;
+ u8 rl_id_last;
+ u8 rl_dc_qcn_flg;
+ u8 dcqcn_reset_alpha_on_idle;
+ u8 rl_bc_stage_th;
+ u8 rl_timer_stage_th;
+ u8 reserved1;
+ __le32 rl_bc_rate;
+ __le16 rl_max_rate;
+ __le16 rl_r_ai;
+ __le16 rl_r_hai;
+ __le16 dcqcn_g;
+ __le32 dcqcn_k_us;
+ __le32 dcqcn_timeuot_us;
+ __le32 qcn_timeuot_us;
+ __le32 reserved2;
+};
+
+/* Slowpath Element (SPQE) */
+struct slow_path_element {
+ struct ramrod_header hdr;
+ struct regpair data_ptr;
+};
+
+/* Tstorm non-triggering VF zone */
+struct tstorm_non_trigger_vf_zone {
+ struct rdma_rcv_stats rdma_stats;
+};
+
+struct tstorm_per_port_stat {
+ struct regpair trunc_error_discard;
+ struct regpair mac_error_discard;
+ struct regpair mftag_filter_discard;
+ struct regpair eth_mac_filter_discard;
+ struct regpair ll2_mac_filter_discard;
+ struct regpair ll2_conn_disabled_discard;
+ struct regpair iscsi_irregular_pkt;
+ struct regpair fcoe_irregular_pkt;
+ struct regpair roce_irregular_pkt;
+ struct regpair iwarp_irregular_pkt;
+ struct regpair eth_irregular_pkt;
+ struct regpair toe_irregular_pkt;
+ struct regpair preroce_irregular_pkt;
+ struct regpair eth_gre_tunn_filter_discard;
+ struct regpair eth_vxlan_tunn_filter_discard;
+ struct regpair eth_geneve_tunn_filter_discard;
+ struct regpair eth_gft_drop_pkt;
+};
+
+/* Tstorm VF zone */
+struct tstorm_vf_zone {
+ struct tstorm_non_trigger_vf_zone non_trigger;
+};
+
+/* Tunnel classification scheme */
+enum tunnel_clss {
+ TUNNEL_CLSS_MAC_VLAN = 0,
+ TUNNEL_CLSS_MAC_VNI,
+ TUNNEL_CLSS_INNER_MAC_VLAN,
+ TUNNEL_CLSS_INNER_MAC_VNI,
+ TUNNEL_CLSS_MAC_VLAN_DUAL_STAGE,
+ MAX_TUNNEL_CLSS
+};
+
+/* Ustorm non-triggering VF zone */
+struct ustorm_non_trigger_vf_zone {
+ struct eth_ustorm_per_queue_stat eth_queue_stat;
+ struct regpair vf_pf_msg_addr;
+};
+
+/* Ustorm triggering VF zone */
+struct ustorm_trigger_vf_zone {
+ u8 vf_pf_msg_valid;
+ u8 reserved[7];
+};
+
+/* Ustorm VF zone */
+struct ustorm_vf_zone {
+ struct ustorm_non_trigger_vf_zone non_trigger;
+ struct ustorm_trigger_vf_zone trigger;
+};
+
+/* VF-PF channel data */
+struct vf_pf_channel_data {
+ __le32 ready;
+ u8 valid;
+ u8 reserved0;
+ __le16 reserved1;
+};
+
+/* Ramrod data for VF start ramrod */
+struct vf_start_ramrod_data {
+ u8 vf_id;
+ u8 enable_flr_ack;
+ __le16 opaque_fid;
+ u8 personality;
+ u8 reserved[7];
+ struct hsi_fp_ver_struct hsi_fp_ver;
+
+};
+
+/* Ramrod data for VF start ramrod */
+struct vf_stop_ramrod_data {
+ u8 vf_id;
+ u8 reserved0;
+ __le16 reserved1;
+ __le32 reserved2;
+};
+
+/* VF zone size mode */
+enum vf_zone_size_mode {
+ VF_ZONE_SIZE_MODE_DEFAULT,
+ VF_ZONE_SIZE_MODE_DOUBLE,
+ VF_ZONE_SIZE_MODE_QUAD,
+ MAX_VF_ZONE_SIZE_MODE
+};
+
+/* Xstorm non-triggering VF zone */
+struct xstorm_non_trigger_vf_zone {
+ struct regpair non_edpm_ack_pkts;
+};
+
+/* Tstorm VF zone */
+struct xstorm_vf_zone {
+ struct xstorm_non_trigger_vf_zone non_trigger;
+};
+
+/* Attentions status block */
+struct atten_status_block {
+ __le32 atten_bits;
+ __le32 atten_ack;
+ __le16 reserved0;
+ __le16 sb_index;
+ __le32 reserved1;
+};
+
+/* DMAE command */
+struct dmae_cmd {
+ __le32 opcode;
+#define DMAE_CMD_SRC_MASK 0x1
+#define DMAE_CMD_SRC_SHIFT 0
+#define DMAE_CMD_DST_MASK 0x3
+#define DMAE_CMD_DST_SHIFT 1
+#define DMAE_CMD_C_DST_MASK 0x1
+#define DMAE_CMD_C_DST_SHIFT 3
+#define DMAE_CMD_CRC_RESET_MASK 0x1
+#define DMAE_CMD_CRC_RESET_SHIFT 4
+#define DMAE_CMD_SRC_ADDR_RESET_MASK 0x1
+#define DMAE_CMD_SRC_ADDR_RESET_SHIFT 5
+#define DMAE_CMD_DST_ADDR_RESET_MASK 0x1
+#define DMAE_CMD_DST_ADDR_RESET_SHIFT 6
+#define DMAE_CMD_COMP_FUNC_MASK 0x1
+#define DMAE_CMD_COMP_FUNC_SHIFT 7
+#define DMAE_CMD_COMP_WORD_EN_MASK 0x1
+#define DMAE_CMD_COMP_WORD_EN_SHIFT 8
+#define DMAE_CMD_COMP_CRC_EN_MASK 0x1
+#define DMAE_CMD_COMP_CRC_EN_SHIFT 9
+#define DMAE_CMD_COMP_CRC_OFFSET_MASK 0x7
+#define DMAE_CMD_COMP_CRC_OFFSET_SHIFT 10
+#define DMAE_CMD_RESERVED1_MASK 0x1
+#define DMAE_CMD_RESERVED1_SHIFT 13
+#define DMAE_CMD_ENDIANITY_MODE_MASK 0x3
+#define DMAE_CMD_ENDIANITY_MODE_SHIFT 14
+#define DMAE_CMD_ERR_HANDLING_MASK 0x3
+#define DMAE_CMD_ERR_HANDLING_SHIFT 16
+#define DMAE_CMD_PORT_ID_MASK 0x3
+#define DMAE_CMD_PORT_ID_SHIFT 18
+#define DMAE_CMD_SRC_PF_ID_MASK 0xF
+#define DMAE_CMD_SRC_PF_ID_SHIFT 20
+#define DMAE_CMD_DST_PF_ID_MASK 0xF
+#define DMAE_CMD_DST_PF_ID_SHIFT 24
+#define DMAE_CMD_SRC_VF_ID_VALID_MASK 0x1
+#define DMAE_CMD_SRC_VF_ID_VALID_SHIFT 28
+#define DMAE_CMD_DST_VF_ID_VALID_MASK 0x1
+#define DMAE_CMD_DST_VF_ID_VALID_SHIFT 29
+#define DMAE_CMD_RESERVED2_MASK 0x3
+#define DMAE_CMD_RESERVED2_SHIFT 30
+ __le32 src_addr_lo;
+ __le32 src_addr_hi;
+ __le32 dst_addr_lo;
+ __le32 dst_addr_hi;
+ __le16 length_dw;
+ __le16 opcode_b;
+#define DMAE_CMD_SRC_VF_ID_MASK 0xFF
+#define DMAE_CMD_SRC_VF_ID_SHIFT 0
+#define DMAE_CMD_DST_VF_ID_MASK 0xFF
+#define DMAE_CMD_DST_VF_ID_SHIFT 8
+ __le32 comp_addr_lo;
+ __le32 comp_addr_hi;
+ __le32 comp_val;
+ __le32 crc32;
+ __le32 crc_32_c;
+ __le16 crc16;
+ __le16 crc16_c;
+ __le16 crc10;
+ __le16 error_bit_reserved;
+#define DMAE_CMD_ERROR_BIT_MASK 0x1
+#define DMAE_CMD_ERROR_BIT_SHIFT 0
+#define DMAE_CMD_RESERVED_MASK 0x7FFF
+#define DMAE_CMD_RESERVED_SHIFT 1
+ __le16 xsum16;
+ __le16 xsum8;
+};
+
+enum dmae_cmd_comp_crc_en_enum {
+ dmae_cmd_comp_crc_disabled,
+ dmae_cmd_comp_crc_enabled,
+ MAX_DMAE_CMD_COMP_CRC_EN_ENUM
+};
+
+enum dmae_cmd_comp_func_enum {
+ dmae_cmd_comp_func_to_src,
+ dmae_cmd_comp_func_to_dst,
+ MAX_DMAE_CMD_COMP_FUNC_ENUM
+};
+
+enum dmae_cmd_comp_word_en_enum {
+ dmae_cmd_comp_word_disabled,
+ dmae_cmd_comp_word_enabled,
+ MAX_DMAE_CMD_COMP_WORD_EN_ENUM
+};
+
+enum dmae_cmd_c_dst_enum {
+ dmae_cmd_c_dst_pcie,
+ dmae_cmd_c_dst_grc,
+ MAX_DMAE_CMD_C_DST_ENUM
+};
+
+enum dmae_cmd_dst_enum {
+ dmae_cmd_dst_none_0,
+ dmae_cmd_dst_pcie,
+ dmae_cmd_dst_grc,
+ dmae_cmd_dst_none_3,
+ MAX_DMAE_CMD_DST_ENUM
+};
+
+enum dmae_cmd_error_handling_enum {
+ dmae_cmd_error_handling_send_regular_comp,
+ dmae_cmd_error_handling_send_comp_with_err,
+ dmae_cmd_error_handling_dont_send_comp,
+ MAX_DMAE_CMD_ERROR_HANDLING_ENUM
+};
+
+enum dmae_cmd_src_enum {
+ dmae_cmd_src_pcie,
+ dmae_cmd_src_grc,
+ MAX_DMAE_CMD_SRC_ENUM
+};
+
+struct mstorm_core_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
+ __le16 word0;
+ __le16 word1;
+ __le32 reg0;
+ __le32 reg1;
+};
+
+struct ystorm_core_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le32 reg0;
+ __le32 reg1;
+ __le16 word1;
+ __le16 word2;
+ __le16 word3;
+ __le16 word4;
+ __le32 reg2;
+ __le32 reg3;
+};
+
+/* DMAE parameters */
+struct qed_dmae_params {
+ u32 flags;
+/* If QED_DMAE_PARAMS_RW_REPL_SRC flag is set and the
+ * source is a block of length DMAE_MAX_RW_SIZE and the
+ * destination is larger, the source block will be duplicated as
+ * many times as required to fill the destination block. This is
+ * used mostly to write a zeroed buffer to destination address
+ * using DMA
+ */
+#define QED_DMAE_PARAMS_RW_REPL_SRC_MASK 0x1
+#define QED_DMAE_PARAMS_RW_REPL_SRC_SHIFT 0
+#define QED_DMAE_PARAMS_SRC_VF_VALID_MASK 0x1
+#define QED_DMAE_PARAMS_SRC_VF_VALID_SHIFT 1
+#define QED_DMAE_PARAMS_DST_VF_VALID_MASK 0x1
+#define QED_DMAE_PARAMS_DST_VF_VALID_SHIFT 2
+#define QED_DMAE_PARAMS_COMPLETION_DST_MASK 0x1
+#define QED_DMAE_PARAMS_COMPLETION_DST_SHIFT 3
+#define QED_DMAE_PARAMS_PORT_VALID_MASK 0x1
+#define QED_DMAE_PARAMS_PORT_VALID_SHIFT 4
+#define QED_DMAE_PARAMS_SRC_PF_VALID_MASK 0x1
+#define QED_DMAE_PARAMS_SRC_PF_VALID_SHIFT 5
+#define QED_DMAE_PARAMS_DST_PF_VALID_MASK 0x1
+#define QED_DMAE_PARAMS_DST_PF_VALID_SHIFT 6
+#define QED_DMAE_PARAMS_RESERVED_MASK 0x1FFFFFF
+#define QED_DMAE_PARAMS_RESERVED_SHIFT 7
+ u8 src_vfid;
+ u8 dst_vfid;
+ u8 port_id;
+ u8 src_pfid;
+ u8 dst_pfid;
+ u8 reserved1;
+ __le16 reserved2;
+};
+
+/* IGU cleanup command */
+struct igu_cleanup {
+ __le32 sb_id_and_flags;
+#define IGU_CLEANUP_RESERVED0_MASK 0x7FFFFFF
+#define IGU_CLEANUP_RESERVED0_SHIFT 0
+#define IGU_CLEANUP_CLEANUP_SET_MASK 0x1
+#define IGU_CLEANUP_CLEANUP_SET_SHIFT 27
+#define IGU_CLEANUP_CLEANUP_TYPE_MASK 0x7
+#define IGU_CLEANUP_CLEANUP_TYPE_SHIFT 28
+#define IGU_CLEANUP_COMMAND_TYPE_MASK 0x1
+#define IGU_CLEANUP_COMMAND_TYPE_SHIFT 31
+ __le32 reserved1;
+};
+
+/* IGU firmware driver command */
+union igu_command {
+ struct igu_prod_cons_update prod_cons_update;
+ struct igu_cleanup cleanup;
+};
+
+/* IGU firmware driver command */
+struct igu_command_reg_ctrl {
+ __le16 opaque_fid;
+ __le16 igu_command_reg_ctrl_fields;
+#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_MASK 0xFFF
+#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_SHIFT 0
+#define IGU_COMMAND_REG_CTRL_RESERVED_MASK 0x7
+#define IGU_COMMAND_REG_CTRL_RESERVED_SHIFT 12
+#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_MASK 0x1
+#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_SHIFT 15
+};
+
+/* IGU mapping line structure */
+struct igu_mapping_line {
+ __le32 igu_mapping_line_fields;
+#define IGU_MAPPING_LINE_VALID_MASK 0x1
+#define IGU_MAPPING_LINE_VALID_SHIFT 0
+#define IGU_MAPPING_LINE_VECTOR_NUMBER_MASK 0xFF
+#define IGU_MAPPING_LINE_VECTOR_NUMBER_SHIFT 1
+#define IGU_MAPPING_LINE_FUNCTION_NUMBER_MASK 0xFF
+#define IGU_MAPPING_LINE_FUNCTION_NUMBER_SHIFT 9
+#define IGU_MAPPING_LINE_PF_VALID_MASK 0x1
+#define IGU_MAPPING_LINE_PF_VALID_SHIFT 17
+#define IGU_MAPPING_LINE_IPS_GROUP_MASK 0x3F
+#define IGU_MAPPING_LINE_IPS_GROUP_SHIFT 18
+#define IGU_MAPPING_LINE_RESERVED_MASK 0xFF
+#define IGU_MAPPING_LINE_RESERVED_SHIFT 24
+};
+
+/* IGU MSIX line structure */
+struct igu_msix_vector {
+ struct regpair address;
+ __le32 data;
+ __le32 msix_vector_fields;
+#define IGU_MSIX_VECTOR_MASK_BIT_MASK 0x1
+#define IGU_MSIX_VECTOR_MASK_BIT_SHIFT 0
+#define IGU_MSIX_VECTOR_RESERVED0_MASK 0x7FFF
+#define IGU_MSIX_VECTOR_RESERVED0_SHIFT 1
+#define IGU_MSIX_VECTOR_STEERING_TAG_MASK 0xFF
+#define IGU_MSIX_VECTOR_STEERING_TAG_SHIFT 16
+#define IGU_MSIX_VECTOR_RESERVED1_MASK 0xFF
+#define IGU_MSIX_VECTOR_RESERVED1_SHIFT 24
+};
+
+/* per encapsulation type enabling flags */
+struct prs_reg_encapsulation_type_en {
+ u8 flags;
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT 0
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT 1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT 2
+#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_SHIFT 3
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT 4
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT 5
+#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_MASK 0x3
+#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_SHIFT 6
+};
+
+enum pxp_tph_st_hint {
+ TPH_ST_HINT_BIDIR,
+ TPH_ST_HINT_REQUESTER,
+ TPH_ST_HINT_TARGET,
+ TPH_ST_HINT_TARGET_PRIO,
+ MAX_PXP_TPH_ST_HINT
+};
+
+/* QM hardware structure of enable bypass credit mask */
+struct qm_rf_bypass_mask {
+ u8 flags;
+#define QM_RF_BYPASS_MASK_LINEVOQ_MASK 0x1
+#define QM_RF_BYPASS_MASK_LINEVOQ_SHIFT 0
+#define QM_RF_BYPASS_MASK_RESERVED0_MASK 0x1
+#define QM_RF_BYPASS_MASK_RESERVED0_SHIFT 1
+#define QM_RF_BYPASS_MASK_PFWFQ_MASK 0x1
+#define QM_RF_BYPASS_MASK_PFWFQ_SHIFT 2
+#define QM_RF_BYPASS_MASK_VPWFQ_MASK 0x1
+#define QM_RF_BYPASS_MASK_VPWFQ_SHIFT 3
+#define QM_RF_BYPASS_MASK_PFRL_MASK 0x1
+#define QM_RF_BYPASS_MASK_PFRL_SHIFT 4
+#define QM_RF_BYPASS_MASK_VPQCNRL_MASK 0x1
+#define QM_RF_BYPASS_MASK_VPQCNRL_SHIFT 5
+#define QM_RF_BYPASS_MASK_FWPAUSE_MASK 0x1
+#define QM_RF_BYPASS_MASK_FWPAUSE_SHIFT 6
+#define QM_RF_BYPASS_MASK_RESERVED1_MASK 0x1
+#define QM_RF_BYPASS_MASK_RESERVED1_SHIFT 7
+};
+
+/* QM hardware structure of opportunistic credit mask */
+struct qm_rf_opportunistic_mask {
+ __le16 flags;
+#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT 0
+#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT 1
+#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT 2
+#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT 3
+#define QM_RF_OPPORTUNISTIC_MASK_PFRL_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT 4
+#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT 5
+#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT 6
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_SHIFT 7
+#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT 8
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_MASK 0x7F
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_SHIFT 9
+};
+
+/* QM hardware structure of QM map memory */
+struct qm_rf_pq_map {
+ __le32 reg;
+#define QM_RF_PQ_MAP_PQ_VALID_MASK 0x1
+#define QM_RF_PQ_MAP_PQ_VALID_SHIFT 0
+#define QM_RF_PQ_MAP_RL_ID_MASK 0xFF
+#define QM_RF_PQ_MAP_RL_ID_SHIFT 1
+#define QM_RF_PQ_MAP_VP_PQ_ID_MASK 0x1FF
+#define QM_RF_PQ_MAP_VP_PQ_ID_SHIFT 9
+#define QM_RF_PQ_MAP_VOQ_MASK 0x1F
+#define QM_RF_PQ_MAP_VOQ_SHIFT 18
+#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_MASK 0x3
+#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_SHIFT 23
+#define QM_RF_PQ_MAP_RL_VALID_MASK 0x1
+#define QM_RF_PQ_MAP_RL_VALID_SHIFT 25
+#define QM_RF_PQ_MAP_RESERVED_MASK 0x3F
+#define QM_RF_PQ_MAP_RESERVED_SHIFT 26
+};
+
+/* Completion params for aggregated interrupt completion */
+struct sdm_agg_int_comp_params {
+ __le16 params;
+#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_MASK 0x3F
+#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT 0
+#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_MASK 0x1
+#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT 6
+#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_MASK 0x1FF
+#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT 7
+};
+
+/* SDM operation gen command (generate aggregative interrupt) */
+struct sdm_op_gen {
+ __le32 command;
+#define SDM_OP_GEN_COMP_PARAM_MASK 0xFFFF
+#define SDM_OP_GEN_COMP_PARAM_SHIFT 0
+#define SDM_OP_GEN_COMP_TYPE_MASK 0xF
+#define SDM_OP_GEN_COMP_TYPE_SHIFT 16
+#define SDM_OP_GEN_RESERVED_MASK 0xFFF
+#define SDM_OP_GEN_RESERVED_SHIFT 20
+};
+
+/* Physical memory descriptor */
+struct phys_mem_desc {
+ dma_addr_t phys_addr;
+ void *virt_addr;
+ u32 size; /* In bytes */
+};
+
+/* Virtual memory descriptor */
+struct virt_mem_desc {
+ void *ptr;
+ u32 size; /* In bytes */
+};
+
+/********************************/
+/* HSI Init Functions constants */
+/********************************/
+
+/* Number of VLAN priorities */
+#define NUM_OF_VLAN_PRIORITIES 8
+
+/* BRB RAM init requirements */
+struct init_brb_ram_req {
+ u32 guranteed_per_tc;
+ u32 headroom_per_tc;
+ u32 min_pkt_size;
+ u32 max_ports_per_engine;
+ u8 num_active_tcs[MAX_NUM_PORTS];
+};
+
+/* ETS per-TC init requirements */
+struct init_ets_tc_req {
+ u8 use_sp;
+ u8 use_wfq;
+ u16 weight;
+};
+
+/* ETS init requirements */
+struct init_ets_req {
+ u32 mtu;
+ struct init_ets_tc_req tc_req[NUM_OF_TCS];
+};
+
+/* NIG LB RL init requirements */
+struct init_nig_lb_rl_req {
+ u16 lb_mac_rate;
+ u16 lb_rate;
+ u32 mtu;
+ u16 tc_rate[NUM_OF_PHYS_TCS];
+};
+
+/* NIG TC mapping for each priority */
+struct init_nig_pri_tc_map_entry {
+ u8 tc_id;
+ u8 valid;
+};
+
+/* NIG priority to TC map init requirements */
+struct init_nig_pri_tc_map_req {
+ struct init_nig_pri_tc_map_entry pri[NUM_OF_VLAN_PRIORITIES];
+};
+
+/* QM per global RL init parameters */
+struct init_qm_global_rl_params {
+ u8 type;
+ u8 reserved0;
+ u16 reserved1;
+ u32 rate_limit;
+};
+
+/* QM per-port init parameters */
+struct init_qm_port_params {
+ u16 active_phys_tcs;
+ u16 num_pbf_cmd_lines;
+ u16 num_btb_blocks;
+ u8 active;
+ u8 reserved;
+};
+
+/* QM per-PQ init parameters */
+struct init_qm_pq_params {
+ u16 vport_id;
+ u16 rl_id;
+ u8 rl_valid;
+ u8 tc_id;
+ u8 wrr_group;
+ u8 port_id;
+};
+
+/* QM per RL init parameters */
+struct init_qm_rl_params {
+ u32 vport_rl;
+ u8 vport_rl_type;
+ u8 reserved[3];
+};
+
+/* QM Rate Limiter types */
+enum init_qm_rl_type {
+ QM_RL_TYPE_NORMAL,
+ QM_RL_TYPE_QCN,
+ MAX_INIT_QM_RL_TYPE
+};
+
+/* QM per-vport init parameters */
+struct init_qm_vport_params {
+ u16 wfq;
+ u16 reserved;
+ u16 tc_wfq[NUM_OF_TCS];
+ u16 first_tx_pq_id[NUM_OF_TCS];
+};
+
+/**************************************/
+/* Init Tool HSI constants and macros */
+/**************************************/
+
+/* Width of GRC address in bits (addresses are specified in dwords) */
+#define GRC_ADDR_BITS 23
+#define MAX_GRC_ADDR (BIT(GRC_ADDR_BITS) - 1)
+
+/* indicates an init that should be applied to any phase ID */
+#define ANY_PHASE_ID 0xffff
+
+/* Max size in dwords of a zipped array */
+#define MAX_ZIPPED_SIZE 8192
+enum chip_ids {
+ CHIP_BB,
+ CHIP_K2,
+ MAX_CHIP_IDS
+};
+
+struct fw_asserts_ram_section {
+ __le16 section_ram_line_offset;
+ __le16 section_ram_line_size;
+ u8 list_dword_offset;
+ u8 list_element_dword_size;
+ u8 list_num_elements;
+ u8 list_next_index_dword_offset;
+};
+
+struct fw_ver_num {
+ u8 major;
+ u8 minor;
+ u8 rev;
+ u8 eng;
+};
+
+struct fw_ver_info {
+ __le16 tools_ver;
+ u8 image_id;
+ u8 reserved1;
+ struct fw_ver_num num;
+ __le32 timestamp;
+ __le32 reserved2;
+};
+
+struct fw_info {
+ struct fw_ver_info ver;
+ struct fw_asserts_ram_section fw_asserts_section;
+};
+
+struct fw_info_location {
+ __le32 grc_addr;
+ __le32 size;
+};
+
+enum init_modes {
+ MODE_BB_A0_DEPRECATED,
+ MODE_BB,
+ MODE_K2,
+ MODE_ASIC,
+ MODE_EMUL_REDUCED,
+ MODE_EMUL_FULL,
+ MODE_FPGA,
+ MODE_CHIPSIM,
+ MODE_SF,
+ MODE_MF_SD,
+ MODE_MF_SI,
+ MODE_PORTS_PER_ENG_1,
+ MODE_PORTS_PER_ENG_2,
+ MODE_PORTS_PER_ENG_4,
+ MODE_100G,
+ MODE_SKIP_PRAM_INIT,
+ MODE_EMUL_MAC,
+ MAX_INIT_MODES
+};
+
+enum init_phases {
+ PHASE_ENGINE,
+ PHASE_PORT,
+ PHASE_PF,
+ PHASE_VF,
+ PHASE_QM_PF,
+ MAX_INIT_PHASES
+};
+
+enum init_split_types {
+ SPLIT_TYPE_NONE,
+ SPLIT_TYPE_PORT,
+ SPLIT_TYPE_PF,
+ SPLIT_TYPE_PORT_PF,
+ SPLIT_TYPE_VF,
+ MAX_INIT_SPLIT_TYPES
+};
+
+/* Binary buffer header */
+struct bin_buffer_hdr {
+ u32 offset;
+ u32 length;
+};
+
+/* Binary init buffer types */
+enum bin_init_buffer_type {
+ BIN_BUF_INIT_FW_VER_INFO,
+ BIN_BUF_INIT_CMD,
+ BIN_BUF_INIT_VAL,
+ BIN_BUF_INIT_MODE_TREE,
+ BIN_BUF_INIT_IRO,
+ BIN_BUF_INIT_OVERLAYS,
+ MAX_BIN_INIT_BUFFER_TYPE
+};
+
+/* FW overlay buffer header */
+struct fw_overlay_buf_hdr {
+ u32 data;
+#define FW_OVERLAY_BUF_HDR_STORM_ID_MASK 0xFF
+#define FW_OVERLAY_BUF_HDR_STORM_ID_SHIFT 0
+#define FW_OVERLAY_BUF_HDR_BUF_SIZE_MASK 0xFFFFFF
+#define FW_OVERLAY_BUF_HDR_BUF_SIZE_SHIFT 8
+};
+
+/* init array header: raw */
+struct init_array_raw_hdr {
+ __le32 data;
+#define INIT_ARRAY_RAW_HDR_TYPE_MASK 0xF
+#define INIT_ARRAY_RAW_HDR_TYPE_SHIFT 0
+#define INIT_ARRAY_RAW_HDR_PARAMS_MASK 0xFFFFFFF
+#define INIT_ARRAY_RAW_HDR_PARAMS_SHIFT 4
+};
+
+/* init array header: standard */
+struct init_array_standard_hdr {
+ __le32 data;
+#define INIT_ARRAY_STANDARD_HDR_TYPE_MASK 0xF
+#define INIT_ARRAY_STANDARD_HDR_TYPE_SHIFT 0
+#define INIT_ARRAY_STANDARD_HDR_SIZE_MASK 0xFFFFFFF
+#define INIT_ARRAY_STANDARD_HDR_SIZE_SHIFT 4
+};
+
+/* init array header: zipped */
+struct init_array_zipped_hdr {
+ __le32 data;
+#define INIT_ARRAY_ZIPPED_HDR_TYPE_MASK 0xF
+#define INIT_ARRAY_ZIPPED_HDR_TYPE_SHIFT 0
+#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_MASK 0xFFFFFFF
+#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_SHIFT 4
+};
+
+/* init array header: pattern */
+struct init_array_pattern_hdr {
+ __le32 data;
+#define INIT_ARRAY_PATTERN_HDR_TYPE_MASK 0xF
+#define INIT_ARRAY_PATTERN_HDR_TYPE_SHIFT 0
+#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_MASK 0xF
+#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_SHIFT 4
+#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_MASK 0xFFFFFF
+#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_SHIFT 8
+};
+
+/* init array header union */
+union init_array_hdr {
+ struct init_array_raw_hdr raw;
+ struct init_array_standard_hdr standard;
+ struct init_array_zipped_hdr zipped;
+ struct init_array_pattern_hdr pattern;
+};
+
+/* init array types */
+enum init_array_types {
+ INIT_ARR_STANDARD,
+ INIT_ARR_ZIPPED,
+ INIT_ARR_PATTERN,
+ MAX_INIT_ARRAY_TYPES
+};
+
+/* init operation: callback */
+struct init_callback_op {
+ __le32 op_data;
+#define INIT_CALLBACK_OP_OP_MASK 0xF
+#define INIT_CALLBACK_OP_OP_SHIFT 0
+#define INIT_CALLBACK_OP_RESERVED_MASK 0xFFFFFFF
+#define INIT_CALLBACK_OP_RESERVED_SHIFT 4
+ __le16 callback_id;
+ __le16 block_id;
+};
+
+/* init operation: delay */
+struct init_delay_op {
+ __le32 op_data;
+#define INIT_DELAY_OP_OP_MASK 0xF
+#define INIT_DELAY_OP_OP_SHIFT 0
+#define INIT_DELAY_OP_RESERVED_MASK 0xFFFFFFF
+#define INIT_DELAY_OP_RESERVED_SHIFT 4
+ __le32 delay;
+};
+
+/* init operation: if_mode */
+struct init_if_mode_op {
+ __le32 op_data;
+#define INIT_IF_MODE_OP_OP_MASK 0xF
+#define INIT_IF_MODE_OP_OP_SHIFT 0
+#define INIT_IF_MODE_OP_RESERVED1_MASK 0xFFF
+#define INIT_IF_MODE_OP_RESERVED1_SHIFT 4
+#define INIT_IF_MODE_OP_CMD_OFFSET_MASK 0xFFFF
+#define INIT_IF_MODE_OP_CMD_OFFSET_SHIFT 16
+ __le16 reserved2;
+ __le16 modes_buf_offset;
+};
+
+/* init operation: if_phase */
+struct init_if_phase_op {
+ __le32 op_data;
+#define INIT_IF_PHASE_OP_OP_MASK 0xF
+#define INIT_IF_PHASE_OP_OP_SHIFT 0
+#define INIT_IF_PHASE_OP_RESERVED1_MASK 0xFFF
+#define INIT_IF_PHASE_OP_RESERVED1_SHIFT 4
+#define INIT_IF_PHASE_OP_CMD_OFFSET_MASK 0xFFFF
+#define INIT_IF_PHASE_OP_CMD_OFFSET_SHIFT 16
+ __le32 phase_data;
+#define INIT_IF_PHASE_OP_PHASE_MASK 0xFF
+#define INIT_IF_PHASE_OP_PHASE_SHIFT 0
+#define INIT_IF_PHASE_OP_RESERVED2_MASK 0xFF
+#define INIT_IF_PHASE_OP_RESERVED2_SHIFT 8
+#define INIT_IF_PHASE_OP_PHASE_ID_MASK 0xFFFF
+#define INIT_IF_PHASE_OP_PHASE_ID_SHIFT 16
+};
+
+/* init mode operators */
+enum init_mode_ops {
+ INIT_MODE_OP_NOT,
+ INIT_MODE_OP_OR,
+ INIT_MODE_OP_AND,
+ MAX_INIT_MODE_OPS
+};
+
+/* init operation: raw */
+struct init_raw_op {
+ __le32 op_data;
+#define INIT_RAW_OP_OP_MASK 0xF
+#define INIT_RAW_OP_OP_SHIFT 0
+#define INIT_RAW_OP_PARAM1_MASK 0xFFFFFFF
+#define INIT_RAW_OP_PARAM1_SHIFT 4
+ __le32 param2;
+};
+
+/* init array params */
+struct init_op_array_params {
+ __le16 size;
+ __le16 offset;
+};
+
+/* Write init operation arguments */
+union init_write_args {
+ __le32 inline_val;
+ __le32 zeros_count;
+ __le32 array_offset;
+ struct init_op_array_params runtime;
+};
+
+/* init operation: write */
+struct init_write_op {
+ __le32 data;
+#define INIT_WRITE_OP_OP_MASK 0xF
+#define INIT_WRITE_OP_OP_SHIFT 0
+#define INIT_WRITE_OP_SOURCE_MASK 0x7
+#define INIT_WRITE_OP_SOURCE_SHIFT 4
+#define INIT_WRITE_OP_RESERVED_MASK 0x1
+#define INIT_WRITE_OP_RESERVED_SHIFT 7
+#define INIT_WRITE_OP_WIDE_BUS_MASK 0x1
+#define INIT_WRITE_OP_WIDE_BUS_SHIFT 8
+#define INIT_WRITE_OP_ADDRESS_MASK 0x7FFFFF
+#define INIT_WRITE_OP_ADDRESS_SHIFT 9
+ union init_write_args args;
+};
+
+/* init operation: read */
+struct init_read_op {
+ __le32 op_data;
+#define INIT_READ_OP_OP_MASK 0xF
+#define INIT_READ_OP_OP_SHIFT 0
+#define INIT_READ_OP_POLL_TYPE_MASK 0xF
+#define INIT_READ_OP_POLL_TYPE_SHIFT 4
+#define INIT_READ_OP_RESERVED_MASK 0x1
+#define INIT_READ_OP_RESERVED_SHIFT 8
+#define INIT_READ_OP_ADDRESS_MASK 0x7FFFFF
+#define INIT_READ_OP_ADDRESS_SHIFT 9
+ __le32 expected_val;
+};
+
+/* Init operations union */
+union init_op {
+ struct init_raw_op raw;
+ struct init_write_op write;
+ struct init_read_op read;
+ struct init_if_mode_op if_mode;
+ struct init_if_phase_op if_phase;
+ struct init_callback_op callback;
+ struct init_delay_op delay;
+};
+
+/* Init command operation types */
+enum init_op_types {
+ INIT_OP_READ,
+ INIT_OP_WRITE,
+ INIT_OP_IF_MODE,
+ INIT_OP_IF_PHASE,
+ INIT_OP_DELAY,
+ INIT_OP_CALLBACK,
+ MAX_INIT_OP_TYPES
+};
+
+/* init polling types */
+enum init_poll_types {
+ INIT_POLL_NONE,
+ INIT_POLL_EQ,
+ INIT_POLL_OR,
+ INIT_POLL_AND,
+ MAX_INIT_POLL_TYPES
+};
+
+/* init source types */
+enum init_source_types {
+ INIT_SRC_INLINE,
+ INIT_SRC_ZEROS,
+ INIT_SRC_ARRAY,
+ INIT_SRC_RUNTIME,
+ MAX_INIT_SOURCE_TYPES
+};
+
+/* Internal RAM Offsets macro data */
+struct iro {
+ u32 base;
+ u16 m1;
+ u16 m2;
+ u16 m3;
+ u16 size;
+};
+
+/* Win 2 */
+#define GTT_BAR0_MAP_REG_IGU_CMD 0x00f000UL
+
+/* Win 3 */
+#define GTT_BAR0_MAP_REG_TSDM_RAM 0x010000UL
+
+/* Win 4 */
+#define GTT_BAR0_MAP_REG_MSDM_RAM 0x011000UL
+
+/* Win 5 */
+#define GTT_BAR0_MAP_REG_MSDM_RAM_1024 0x012000UL
+
+/* Win 6 */
+#define GTT_BAR0_MAP_REG_MSDM_RAM_2048 0x013000UL
+
+/* Win 7 */
+#define GTT_BAR0_MAP_REG_USDM_RAM 0x014000UL
+
+/* Win 8 */
+#define GTT_BAR0_MAP_REG_USDM_RAM_1024 0x015000UL
+
+/* Win 9 */
+#define GTT_BAR0_MAP_REG_USDM_RAM_2048 0x016000UL
+
+/* Win 10 */
+#define GTT_BAR0_MAP_REG_XSDM_RAM 0x017000UL
+
+/* Win 11 */
+#define GTT_BAR0_MAP_REG_XSDM_RAM_1024 0x018000UL
+
+/* Win 12 */
+#define GTT_BAR0_MAP_REG_YSDM_RAM 0x019000UL
+
+/* Win 13 */
+#define GTT_BAR0_MAP_REG_PSDM_RAM 0x01a000UL
+
+/* Returns the VOQ based on port and TC */
+#define VOQ(port, tc, max_phys_tcs_per_port) ((tc) == \
+ PURE_LB_TC ? NUM_OF_PHYS_TCS *\
+ MAX_NUM_PORTS_BB + \
+ (port) : (port) * \
+ (max_phys_tcs_per_port) + (tc))
+
+struct init_qm_pq_params;
+
+/**
+ * qed_qm_pf_mem_size(): Prepare QM ILT sizes.
+ *
+ * @num_pf_cids: Number of connections used by this PF.
+ * @num_vf_cids: Number of connections used by VFs of this PF.
+ * @num_tids: Number of tasks used by this PF.
+ * @num_pf_pqs: Number of PQs used by this PF.
+ * @num_vf_pqs: Number of PQs used by VFs of this PF.
+ *
+ * Return: The required host memory size in 4KB units.
+ *
+ * Returns the required host memory size in 4KB units.
+ * Must be called before all QM init HSI functions.
+ */
+u32 qed_qm_pf_mem_size(u32 num_pf_cids,
+ u32 num_vf_cids,
+ u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs);
+
+struct qed_qm_common_rt_init_params {
+ u8 max_ports_per_engine;
+ u8 max_phys_tcs_per_port;
+ bool pf_rl_en;
+ bool pf_wfq_en;
+ bool global_rl_en;
+ bool vport_wfq_en;
+ struct init_qm_port_params *port_params;
+ struct init_qm_global_rl_params
+ global_rl_params[COMMON_MAX_QM_GLOBAL_RLS];
+};
+
+/**
+ * qed_qm_common_rt_init(): Prepare QM runtime init values for the
+ * engine phase.
+ *
+ * @p_hwfn: HW device data.
+ * @p_params: Parameters.
+ *
+ * Return: 0 on success, -1 on error.
+ */
+int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn,
+ struct qed_qm_common_rt_init_params *p_params);
+
+struct qed_qm_pf_rt_init_params {
+ u8 port_id;
+ u8 pf_id;
+ u8 max_phys_tcs_per_port;
+ bool is_pf_loading;
+ u32 num_pf_cids;
+ u32 num_vf_cids;
+ u32 num_tids;
+ u16 start_pq;
+ u16 num_pf_pqs;
+ u16 num_vf_pqs;
+ u16 start_vport;
+ u16 num_vports;
+ u16 start_rl;
+ u16 num_rls;
+ u16 pf_wfq;
+ u32 pf_rl;
+ u32 link_speed;
+ struct init_qm_pq_params *pq_params;
+ struct init_qm_vport_params *vport_params;
+ struct init_qm_rl_params *rl_params;
+};
+
+/**
+ * qed_qm_pf_rt_init(): Prepare QM runtime init values for the PF phase.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers
+ * @p_params: Parameters.
+ *
+ * Return: 0 on success, -1 on error.
+ */
+int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_qm_pf_rt_init_params *p_params);
+
+/**
+ * qed_init_pf_wfq(): Initializes the WFQ weight of the specified PF.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers
+ * @pf_id: PF ID
+ * @pf_wfq: WFQ weight. Must be non-zero.
+ *
+ * Return: 0 on success, -1 on error.
+ */
+int qed_init_pf_wfq(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 pf_id, u16 pf_wfq);
+
+/**
+ * qed_init_pf_rl(): Initializes the rate limit of the specified PF
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @pf_id: PF ID.
+ * @pf_rl: rate limit in Mb/sec units
+ *
+ * Return: 0 on success, -1 on error.
+ */
+int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 pf_id, u32 pf_rl);
+
+/**
+ * qed_init_vport_wfq(): Initializes the WFQ weight of the specified VPORT
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers
+ * @first_tx_pq_id: An array containing the first Tx PQ ID associated
+ * with the VPORT for each TC. This array is filled by
+ * qed_qm_pf_rt_init
+ * @wfq: WFQ weight. Must be non-zero.
+ *
+ * Return: 0 on success, -1 on error.
+ */
+int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 first_tx_pq_id[NUM_OF_TCS], u16 wfq);
+
+/**
+ * qed_init_vport_tc_wfq(): Initializes the WFQ weight of the specified
+ * VPORT and TC.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @first_tx_pq_id: The first Tx PQ ID associated with the VPORT and TC.
+ * (filled by qed_qm_pf_rt_init).
+ * @weight: VPORT+TC WFQ weight.
+ *
+ * Return: 0 on success, -1 on error.
+ */
+int qed_init_vport_tc_wfq(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 first_tx_pq_id, u16 weight);
+
+/**
+ * qed_init_global_rl(): Initializes the rate limit of the specified
+ * rate limiter.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @rl_id: RL ID.
+ * @rate_limit: Rate limit in Mb/sec units
+ * @vport_rl_type: Vport RL type.
+ *
+ * Return: 0 on success, -1 on error.
+ */
+int qed_init_global_rl(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 rl_id, u32 rate_limit,
+ enum init_qm_rl_type vport_rl_type);
+
+/**
+ * qed_send_qm_stop_cmd(): Sends a stop command to the QM.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @is_release_cmd: true for release, false for stop.
+ * @is_tx_pq: true for Tx PQs, false for Other PQs.
+ * @start_pq: first PQ ID to stop
+ * @num_pqs: Number of PQs to stop, starting from start_pq.
+ *
+ * Return: Bool, true if successful, false if timeout occurred while waiting
+ * for QM command done.
+ */
+bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ bool is_release_cmd,
+ bool is_tx_pq, u16 start_pq, u16 num_pqs);
+
+/**
+ * qed_set_vxlan_dest_port(): Initializes vxlan tunnel destination udp port.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @dest_port: vxlan destination udp port.
+ *
+ * Return: Void.
+ */
+void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u16 dest_port);
+
+/**
+ * qed_set_vxlan_enable(): Enable or disable VXLAN tunnel in HW.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @vxlan_enable: vxlan enable flag.
+ *
+ * Return: Void.
+ */
+void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, bool vxlan_enable);
+
+/**
+ * qed_set_gre_enable(): Enable or disable GRE tunnel in HW.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @eth_gre_enable: Eth GRE enable flag.
+ * @ip_gre_enable: IP GRE enable flag.
+ *
+ * Return: Void.
+ */
+void qed_set_gre_enable(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ bool eth_gre_enable, bool ip_gre_enable);
+
+/**
+ * qed_set_geneve_dest_port(): Initializes geneve tunnel destination udp port
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @dest_port: Geneve destination udp port.
+ *
+ * Retur: Void.
+ */
+void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u16 dest_port);
+
+/**
+ * qed_set_geneve_enable(): Enable or disable GRE tunnel in HW.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @eth_geneve_enable: Eth GENEVE enable flag.
+ * @ip_geneve_enable: IP GENEVE enable flag.
+ *
+ * Return: Void.
+ */
+void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ bool eth_geneve_enable, bool ip_geneve_enable);
+
+void qed_set_vxlan_no_l2_enable(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, bool enable);
+
+/**
+ * qed_gft_disable(): Disable GFT.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @pf_id: PF on which to disable GFT.
+ *
+ * Return: Void.
+ */
+void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id);
+
+/**
+ * qed_gft_config(): Enable and configure HW for GFT.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @pf_id: PF on which to enable GFT.
+ * @tcp: Set profile tcp packets.
+ * @udp: Set profile udp packet.
+ * @ipv4: Set profile ipv4 packet.
+ * @ipv6: Set profile ipv6 packet.
+ * @profile_type: Define packet same fields. Use enum gft_profile_type.
+ *
+ * Return: Void.
+ */
+void qed_gft_config(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 pf_id,
+ bool tcp,
+ bool udp,
+ bool ipv4, bool ipv6, enum gft_profile_type profile_type);
+
+/**
+ * qed_enable_context_validation(): Enable and configure context
+ * validation.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ *
+ * Return: Void.
+ */
+void qed_enable_context_validation(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * qed_calc_session_ctx_validation(): Calcualte validation byte for
+ * session context.
+ *
+ * @p_ctx_mem: Pointer to context memory.
+ * @ctx_size: Context size.
+ * @ctx_type: Context type.
+ * @cid: Context cid.
+ *
+ * Return: Void.
+ */
+void qed_calc_session_ctx_validation(void *p_ctx_mem,
+ u16 ctx_size, u8 ctx_type, u32 cid);
+
+/**
+ * qed_calc_task_ctx_validation(): Calcualte validation byte for task
+ * context.
+ *
+ * @p_ctx_mem: Pointer to context memory.
+ * @ctx_size: Context size.
+ * @ctx_type: Context type.
+ * @tid: Context tid.
+ *
+ * Return: Void.
+ */
+void qed_calc_task_ctx_validation(void *p_ctx_mem,
+ u16 ctx_size, u8 ctx_type, u32 tid);
+
+/**
+ * qed_memset_session_ctx(): Memset session context to 0 while
+ * preserving validation bytes.
+ *
+ * @p_ctx_mem: Pointer to context memory.
+ * @ctx_size: Size to initialzie.
+ * @ctx_type: Context type.
+ *
+ * Return: Void.
+ */
+void qed_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type);
+
+/**
+ * qed_memset_task_ctx(): Memset task context to 0 while preserving
+ * validation bytes.
+ *
+ * @p_ctx_mem: Pointer to context memory.
+ * @ctx_size: size to initialzie.
+ * @ctx_type: context type.
+ *
+ * Return: Void.
+ */
+void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type);
+
+#define NUM_STORMS 6
+
+/**
+ * qed_get_protocol_type_str(): Get a string for Protocol type.
+ *
+ * @protocol_type: Protocol type (using enum protocol_type).
+ *
+ * Return: String.
+ */
+const char *qed_get_protocol_type_str(u32 protocol_type);
+
+/**
+ * qed_get_ramrod_cmd_id_str(): Get a string for Ramrod command ID.
+ *
+ * @protocol_type: Protocol type (using enum protocol_type).
+ * @ramrod_cmd_id: Ramrod command ID (using per-protocol enum <protocol>_ramrod_cmd_id).
+ *
+ * Return: String.
+ */
+const char *qed_get_ramrod_cmd_id_str(u32 protocol_type, u32 ramrod_cmd_id);
+
+/**
+ * qed_set_rdma_error_level(): Sets the RDMA assert level.
+ * If the severity of the error will be
+ * above the level, the FW will assert.
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @assert_level: An array of assert levels for each storm.
+ *
+ * Return: Void.
+ */
+void qed_set_rdma_error_level(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u8 assert_level[NUM_STORMS]);
+/**
+ * qed_fw_overlay_mem_alloc(): Allocates and fills the FW overlay memory.
+ *
+ * @p_hwfn: HW device data.
+ * @fw_overlay_in_buf: The input FW overlay buffer.
+ * @buf_size_in_bytes: The size of the input FW overlay buffer in bytes.
+ * must be aligned to dwords.
+ *
+ * Return: A pointer to the allocated overlays memory,
+ * or NULL in case of failures.
+ */
+struct phys_mem_desc *
+qed_fw_overlay_mem_alloc(struct qed_hwfn *p_hwfn,
+ const u32 *const fw_overlay_in_buf,
+ u32 buf_size_in_bytes);
+
+/**
+ * qed_fw_overlay_init_ram(): Initializes the FW overlay RAM.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @fw_overlay_mem: the allocated FW overlay memory.
+ *
+ * Return: Void.
+ */
+void qed_fw_overlay_init_ram(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct phys_mem_desc *fw_overlay_mem);
+
+/**
+ * qed_fw_overlay_mem_free(): Frees the FW overlay memory.
+ *
+ * @p_hwfn: HW device data.
+ * @fw_overlay_mem: The allocated FW overlay memory to free.
+ *
+ * Return: Void.
+ */
+void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn,
+ struct phys_mem_desc **fw_overlay_mem);
+
+#define PCICFG_OFFSET 0x2000
+#define GRC_CONFIG_REG_PF_INIT_VF 0x624
+
+/* First VF_NUM for PF is encoded in this register.
+ * The number of VFs assigned to a PF is assumed to be a multiple of 8.
+ * Software should program these bits based on Total Number of VFs programmed
+ * for each PF.
+ * Since registers from 0x000-0x7ff are spilt across functions, each PF will
+ * have the same location for the same 4 bits
+ */
+#define GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK 0xff
+
+/* Runtime array offsets */
+#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0
+#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET 1
+#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET 2
+#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET 3
+#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET 4
+#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET 5
+#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET 6
+#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET 7
+#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET 8
+#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET 9
+#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET 10
+#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET 11
+#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET 12
+#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET 13
+#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14
+#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15
+#define DORQ_REG_VF_ICID_BIT_SHIFT_NORM_RT_OFFSET 16
+#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 17
+#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET 18
+#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 19
+#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 20
+#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 21
+#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 22
+#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 23
+#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 24
+#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 25
+#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 26
+#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
+#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 762
+#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 736
+#define CAU_REG_PI_MEMORY_RT_OFFSET 1498
+#define CAU_REG_PI_MEMORY_RT_SIZE 4416
+#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 5914
+#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 5915
+#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 5916
+#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 5917
+#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 5918
+#define PRS_REG_SEARCH_TCP_RT_OFFSET 5919
+#define PRS_REG_SEARCH_FCOE_RT_OFFSET 5920
+#define PRS_REG_SEARCH_ROCE_RT_OFFSET 5921
+#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 5922
+#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 5923
+#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 5924
+#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 5925
+#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 5926
+#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 5927
+#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 5928
+#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 5929
+#define SRC_REG_FIRSTFREE_RT_OFFSET 5930
+#define SRC_REG_FIRSTFREE_RT_SIZE 2
+#define SRC_REG_LASTFREE_RT_OFFSET 5932
+#define SRC_REG_LASTFREE_RT_SIZE 2
+#define SRC_REG_COUNTFREE_RT_OFFSET 5934
+#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 5935
+#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 5936
+#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 5937
+#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 5938
+#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 5939
+#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 5940
+#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET 5941
+#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 5942
+#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 5943
+#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 5944
+#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 5945
+#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 5946
+#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 5947
+#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 5948
+#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 5949
+#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 5950
+#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 5951
+#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 5952
+#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 5953
+#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 5954
+#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 5955
+#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 5956
+#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 5957
+#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 5958
+#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 5959
+#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 5960
+#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 5961
+#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 5962
+#define PSWRQ2_REG_VF_BASE_RT_OFFSET 5963
+#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 5964
+#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 5965
+#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 5966
+#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 5967
+#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000
+#define PGLUE_REG_B_VF_BASE_RT_OFFSET 27967
+#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET 27968
+#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET 27969
+#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 27970
+#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 27971
+#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 27972
+#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 27973
+#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 27974
+#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 27975
+#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 27976
+#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 27977
+#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 27978
+#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 27979
+#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416
+#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 28395
+#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 512
+#define QM_REG_MAXPQSIZE_0_RT_OFFSET 28907
+#define QM_REG_MAXPQSIZE_1_RT_OFFSET 28908
+#define QM_REG_MAXPQSIZE_2_RT_OFFSET 28909
+#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 28910
+#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 28911
+#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 28912
+#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 28913
+#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 28914
+#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 28915
+#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 28916
+#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 28917
+#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 28918
+#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 28919
+#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 28920
+#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 28921
+#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 28922
+#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 28923
+#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 28924
+#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 28925
+#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 28926
+#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 28927
+#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 28928
+#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 28929
+#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 28930
+#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 28931
+#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 28932
+#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 28933
+#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 28934
+#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 28935
+#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 28936
+#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 28937
+#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 28938
+#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 28939
+#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 28940
+#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 28941
+#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 28942
+#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 28943
+#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 28944
+#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 28945
+#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 28946
+#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 28947
+#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 28948
+#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 28949
+#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 28950
+#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 28951
+#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 28952
+#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 28953
+#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 28954
+#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 28955
+#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 28956
+#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 28957
+#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 28958
+#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 28959
+#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 28960
+#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 28961
+#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 28962
+#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 28963
+#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 28964
+#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 28965
+#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 28966
+#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 28967
+#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 28968
+#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 28969
+#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 28970
+#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 28971
+#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 28972
+#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 28973
+#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 28974
+#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128
+#define QM_REG_PTRTBLOTHER_RT_OFFSET 29102
+#define QM_REG_PTRTBLOTHER_RT_SIZE 256
+#define QM_REG_VOQCRDLINE_RT_OFFSET 29358
+#define QM_REG_VOQCRDLINE_RT_SIZE 20
+#define QM_REG_VOQINITCRDLINE_RT_OFFSET 29378
+#define QM_REG_VOQINITCRDLINE_RT_SIZE 20
+#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29398
+#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29399
+#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29400
+#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29401
+#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29402
+#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29403
+#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29404
+#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29405
+#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29406
+#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29407
+#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29408
+#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29409
+#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29410
+#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29411
+#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29412
+#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29413
+#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29414
+#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29415
+#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29416
+#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29417
+#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29418
+#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29419
+#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29420
+#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29421
+#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29422
+#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29423
+#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29424
+#define QM_REG_PQTX2PF_0_RT_OFFSET 29425
+#define QM_REG_PQTX2PF_1_RT_OFFSET 29426
+#define QM_REG_PQTX2PF_2_RT_OFFSET 29427
+#define QM_REG_PQTX2PF_3_RT_OFFSET 29428
+#define QM_REG_PQTX2PF_4_RT_OFFSET 29429
+#define QM_REG_PQTX2PF_5_RT_OFFSET 29430
+#define QM_REG_PQTX2PF_6_RT_OFFSET 29431
+#define QM_REG_PQTX2PF_7_RT_OFFSET 29432
+#define QM_REG_PQTX2PF_8_RT_OFFSET 29433
+#define QM_REG_PQTX2PF_9_RT_OFFSET 29434
+#define QM_REG_PQTX2PF_10_RT_OFFSET 29435
+#define QM_REG_PQTX2PF_11_RT_OFFSET 29436
+#define QM_REG_PQTX2PF_12_RT_OFFSET 29437
+#define QM_REG_PQTX2PF_13_RT_OFFSET 29438
+#define QM_REG_PQTX2PF_14_RT_OFFSET 29439
+#define QM_REG_PQTX2PF_15_RT_OFFSET 29440
+#define QM_REG_PQTX2PF_16_RT_OFFSET 29441
+#define QM_REG_PQTX2PF_17_RT_OFFSET 29442
+#define QM_REG_PQTX2PF_18_RT_OFFSET 29443
+#define QM_REG_PQTX2PF_19_RT_OFFSET 29444
+#define QM_REG_PQTX2PF_20_RT_OFFSET 29445
+#define QM_REG_PQTX2PF_21_RT_OFFSET 29446
+#define QM_REG_PQTX2PF_22_RT_OFFSET 29447
+#define QM_REG_PQTX2PF_23_RT_OFFSET 29448
+#define QM_REG_PQTX2PF_24_RT_OFFSET 29449
+#define QM_REG_PQTX2PF_25_RT_OFFSET 29450
+#define QM_REG_PQTX2PF_26_RT_OFFSET 29451
+#define QM_REG_PQTX2PF_27_RT_OFFSET 29452
+#define QM_REG_PQTX2PF_28_RT_OFFSET 29453
+#define QM_REG_PQTX2PF_29_RT_OFFSET 29454
+#define QM_REG_PQTX2PF_30_RT_OFFSET 29455
+#define QM_REG_PQTX2PF_31_RT_OFFSET 29456
+#define QM_REG_PQTX2PF_32_RT_OFFSET 29457
+#define QM_REG_PQTX2PF_33_RT_OFFSET 29458
+#define QM_REG_PQTX2PF_34_RT_OFFSET 29459
+#define QM_REG_PQTX2PF_35_RT_OFFSET 29460
+#define QM_REG_PQTX2PF_36_RT_OFFSET 29461
+#define QM_REG_PQTX2PF_37_RT_OFFSET 29462
+#define QM_REG_PQTX2PF_38_RT_OFFSET 29463
+#define QM_REG_PQTX2PF_39_RT_OFFSET 29464
+#define QM_REG_PQTX2PF_40_RT_OFFSET 29465
+#define QM_REG_PQTX2PF_41_RT_OFFSET 29466
+#define QM_REG_PQTX2PF_42_RT_OFFSET 29467
+#define QM_REG_PQTX2PF_43_RT_OFFSET 29468
+#define QM_REG_PQTX2PF_44_RT_OFFSET 29469
+#define QM_REG_PQTX2PF_45_RT_OFFSET 29470
+#define QM_REG_PQTX2PF_46_RT_OFFSET 29471
+#define QM_REG_PQTX2PF_47_RT_OFFSET 29472
+#define QM_REG_PQTX2PF_48_RT_OFFSET 29473
+#define QM_REG_PQTX2PF_49_RT_OFFSET 29474
+#define QM_REG_PQTX2PF_50_RT_OFFSET 29475
+#define QM_REG_PQTX2PF_51_RT_OFFSET 29476
+#define QM_REG_PQTX2PF_52_RT_OFFSET 29477
+#define QM_REG_PQTX2PF_53_RT_OFFSET 29478
+#define QM_REG_PQTX2PF_54_RT_OFFSET 29479
+#define QM_REG_PQTX2PF_55_RT_OFFSET 29480
+#define QM_REG_PQTX2PF_56_RT_OFFSET 29481
+#define QM_REG_PQTX2PF_57_RT_OFFSET 29482
+#define QM_REG_PQTX2PF_58_RT_OFFSET 29483
+#define QM_REG_PQTX2PF_59_RT_OFFSET 29484
+#define QM_REG_PQTX2PF_60_RT_OFFSET 29485
+#define QM_REG_PQTX2PF_61_RT_OFFSET 29486
+#define QM_REG_PQTX2PF_62_RT_OFFSET 29487
+#define QM_REG_PQTX2PF_63_RT_OFFSET 29488
+#define QM_REG_PQOTHER2PF_0_RT_OFFSET 29489
+#define QM_REG_PQOTHER2PF_1_RT_OFFSET 29490
+#define QM_REG_PQOTHER2PF_2_RT_OFFSET 29491
+#define QM_REG_PQOTHER2PF_3_RT_OFFSET 29492
+#define QM_REG_PQOTHER2PF_4_RT_OFFSET 29493
+#define QM_REG_PQOTHER2PF_5_RT_OFFSET 29494
+#define QM_REG_PQOTHER2PF_6_RT_OFFSET 29495
+#define QM_REG_PQOTHER2PF_7_RT_OFFSET 29496
+#define QM_REG_PQOTHER2PF_8_RT_OFFSET 29497
+#define QM_REG_PQOTHER2PF_9_RT_OFFSET 29498
+#define QM_REG_PQOTHER2PF_10_RT_OFFSET 29499
+#define QM_REG_PQOTHER2PF_11_RT_OFFSET 29500
+#define QM_REG_PQOTHER2PF_12_RT_OFFSET 29501
+#define QM_REG_PQOTHER2PF_13_RT_OFFSET 29502
+#define QM_REG_PQOTHER2PF_14_RT_OFFSET 29503
+#define QM_REG_PQOTHER2PF_15_RT_OFFSET 29504
+#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 29505
+#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 29506
+#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 29507
+#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 29508
+#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 29509
+#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 29510
+#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 29511
+#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 29512
+#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 29513
+#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 29514
+#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 29515
+#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 29516
+#define QM_REG_RLGLBLINCVAL_RT_OFFSET 29517
+#define QM_REG_RLGLBLINCVAL_RT_SIZE 256
+#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 29773
+#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256
+#define QM_REG_RLGLBLCRD_RT_OFFSET 30029
+#define QM_REG_RLGLBLCRD_RT_SIZE 256
+#define QM_REG_RLGLBLENABLE_RT_OFFSET 30285
+#define QM_REG_RLPFPERIOD_RT_OFFSET 30286
+#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30287
+#define QM_REG_RLPFINCVAL_RT_OFFSET 30288
+#define QM_REG_RLPFINCVAL_RT_SIZE 16
+#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30304
+#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16
+#define QM_REG_RLPFCRD_RT_OFFSET 30320
+#define QM_REG_RLPFCRD_RT_SIZE 16
+#define QM_REG_RLPFENABLE_RT_OFFSET 30336
+#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30337
+#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30338
+#define QM_REG_WFQPFWEIGHT_RT_SIZE 16
+#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30354
+#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16
+#define QM_REG_WFQPFCRD_RT_OFFSET 30370
+#define QM_REG_WFQPFCRD_RT_SIZE 160
+#define QM_REG_WFQPFENABLE_RT_OFFSET 30530
+#define QM_REG_WFQVPENABLE_RT_OFFSET 30531
+#define QM_REG_BASEADDRTXPQ_RT_OFFSET 30532
+#define QM_REG_BASEADDRTXPQ_RT_SIZE 512
+#define QM_REG_TXPQMAP_RT_OFFSET 31044
+#define QM_REG_TXPQMAP_RT_SIZE 512
+#define QM_REG_WFQVPWEIGHT_RT_OFFSET 31556
+#define QM_REG_WFQVPWEIGHT_RT_SIZE 512
+#define QM_REG_WFQVPUPPERBOUND_RT_OFFSET 32068
+#define QM_REG_WFQVPUPPERBOUND_RT_SIZE 512
+#define QM_REG_WFQVPCRD_RT_OFFSET 32580
+#define QM_REG_WFQVPCRD_RT_SIZE 512
+#define QM_REG_WFQVPMAP_RT_OFFSET 33092
+#define QM_REG_WFQVPMAP_RT_SIZE 512
+#define QM_REG_PTRTBLTX_RT_OFFSET 33604
+#define QM_REG_PTRTBLTX_RT_SIZE 1024
+#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 34628
+#define QM_REG_WFQPFCRD_MSB_RT_SIZE 160
+#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 34788
+#define NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET 34789
+#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 34790
+#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 34791
+#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 34792
+#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 34793
+#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 34794
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 34795
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 34799
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 34803
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 34835
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 34851
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 34867
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 34883
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
+#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 34899
+#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 34900
+#define NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE 8
+#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 34908
+#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 34909
+#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 34910
+#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 34911
+#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 34912
+#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 34913
+#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 34914
+#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 34915
+#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 34916
+#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 34917
+#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 34918
+#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 34919
+#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 34920
+#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 34921
+#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 34922
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 34923
+#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 34924
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 34925
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 34926
+#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 34927
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 34928
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 34929
+#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 34930
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 34931
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 34932
+#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 34933
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 34934
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 34935
+#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 34936
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 34937
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 34938
+#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 34939
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 34940
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 34941
+#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 34942
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 34943
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 34944
+#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 34945
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 34946
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 34947
+#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 34948
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 34949
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 34950
+#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 34951
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 34952
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 34953
+#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 34954
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 34955
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 34956
+#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 34957
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 34958
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 34959
+#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 34960
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 34961
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 34962
+#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 34963
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 34964
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 34965
+#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 34966
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 34967
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 34968
+#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 34969
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 34970
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 34971
+#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 34972
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 34973
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 34974
+#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 34975
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 34976
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 34977
+#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 34978
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 34979
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 34980
+#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 34981
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 34982
+#define XCM_REG_CON_PHY_Q3_RT_OFFSET 34983
+
+#define RUNTIME_ARRAY_SIZE 34984
+
+/* Init Callbacks */
+#define DMAE_READY_CB 0
+
+/* The eth storm context for the Tstorm */
+struct tstorm_eth_conn_st_ctx {
+ __le32 reserved[4];
+};
+
+/* The eth storm context for the Pstorm */
+struct pstorm_eth_conn_st_ctx {
+ __le32 reserved[8];
+};
+
+/* The eth storm context for the Xstorm */
+struct xstorm_eth_conn_st_ctx {
+ __le32 reserved[60];
+};
+
+struct xstorm_eth_conn_ag_ctx {
+ u8 reserved0;
+ u8 state;
+ u8 flags0;
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT 7
+ u8 flags1;
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_E5_RESERVED2_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_E5_RESERVED2_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_E5_RESERVED3_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_E5_RESERVED3_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+ u8 flags2;
+#define XSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 6
+ u8 flags3;
+#define XSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 6
+ u8 flags4;
+#define XSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT 6
+ u8 flags5;
+#define XSTORM_ETH_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT 6
+ u8 flags6;
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+ u8 flags7;
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 7
+ u8 flags8;
+#define XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 7
+ u8 flags9;
+#define XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
+ u8 flags10;
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT 7
+ u8 flags11;
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT 7
+ u8 flags12;
+#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT 7
+ u8 flags13;
+#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
+ u8 edpm_event_id;
+ __le16 physical_q0;
+ __le16 e5_reserved1;
+ __le16 edpm_num_bds;
+ __le16 tx_bd_cons;
+ __le16 tx_bd_prod;
+ __le16 updated_qm_pq_id;
+ __le16 conn_dpi;
+ u8 byte3;
+ u8 byte4;
+ u8 byte5;
+ u8 byte6;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le32 reg4;
+ __le32 reg5;
+ __le32 reg6;
+ __le16 word7;
+ __le16 word8;
+ __le16 word9;
+ __le16 word10;
+ __le32 reg7;
+ __le32 reg8;
+ __le32 reg9;
+ u8 byte7;
+ u8 byte8;
+ u8 byte9;
+ u8 byte10;
+ u8 byte11;
+ u8 byte12;
+ u8 byte13;
+ u8 byte14;
+ u8 byte15;
+ u8 e5_reserved;
+ __le16 word11;
+ __le32 reg10;
+ __le32 reg11;
+ __le32 reg12;
+ __le32 reg13;
+ __le32 reg14;
+ __le32 reg15;
+ __le32 reg16;
+ __le32 reg17;
+ __le32 reg18;
+ __le32 reg19;
+ __le16 word12;
+ __le16 word13;
+ __le16 word14;
+ __le16 word15;
+};
+
+/* The eth storm context for the Ystorm */
+struct ystorm_eth_conn_st_ctx {
+ __le32 reserved[8];
+};
+
+struct ystorm_eth_conn_ag_ctx {
+ u8 byte0;
+ u8 state;
+ u8 flags0;
+#define YSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3
+#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 2
+#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK 0x3
+#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT 4
+#define YSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0
+#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT 1
+#define YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
+ u8 tx_q0_int_coallecing_timeset;
+ u8 byte3;
+ __le16 word0;
+ __le32 terminate_spqe;
+ __le32 reg1;
+ __le16 tx_bd_cons_upd;
+ __le16 word2;
+ __le16 word3;
+ __le16 word4;
+ __le32 reg2;
+ __le32 reg3;
+};
+
+struct tstorm_eth_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6
+ u8 flags1;
+#define TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6
+ u8 flags2;
+#define TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6
+ u8 flags3;
+#define TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6
+#define TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7
+ u8 flags4;
+#define TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6
+#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags5;
+#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le32 reg4;
+ __le32 reg5;
+ __le32 reg6;
+ __le32 reg7;
+ __le32 reg8;
+ u8 byte2;
+ u8 byte3;
+ __le16 rx_bd_cons;
+ u8 byte4;
+ u8 byte5;
+ __le16 rx_bd_prod;
+ __le16 word2;
+ __le16 word3;
+ __le32 reg9;
+ __le32 reg10;
+};
+
+struct ustorm_eth_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+#define USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT 2
+#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT 4
+#define USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6
+ u8 flags2;
+#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT 0
+#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT 1
+#define USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6
+#define USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags3;
+#define USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le16 tx_bd_cons;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 tx_int_coallecing_timeset;
+ __le16 tx_drv_bd_cons;
+ __le16 rx_drv_cqe_cons;
+};
+
+/* The eth storm context for the Ustorm */
+struct ustorm_eth_conn_st_ctx {
+ __le32 reserved[40];
+};
+
+/* The eth storm context for the Mstorm */
+struct mstorm_eth_conn_st_ctx {
+ __le32 reserved[8];
+};
+
+/* eth connection context */
+struct eth_conn_context {
+ struct tstorm_eth_conn_st_ctx tstorm_st_context;
+ struct regpair tstorm_st_padding[2];
+ struct pstorm_eth_conn_st_ctx pstorm_st_context;
+ struct xstorm_eth_conn_st_ctx xstorm_st_context;
+ struct xstorm_eth_conn_ag_ctx xstorm_ag_context;
+ struct tstorm_eth_conn_ag_ctx tstorm_ag_context;
+ struct ystorm_eth_conn_st_ctx ystorm_st_context;
+ struct ystorm_eth_conn_ag_ctx ystorm_ag_context;
+ struct ustorm_eth_conn_ag_ctx ustorm_ag_context;
+ struct ustorm_eth_conn_st_ctx ustorm_st_context;
+ struct mstorm_eth_conn_st_ctx mstorm_st_context;
+};
+
+/* Ethernet filter types: mac/vlan/pair */
+enum eth_error_code {
+ ETH_OK = 0x00,
+ ETH_FILTERS_MAC_ADD_FAIL_FULL,
+ ETH_FILTERS_MAC_ADD_FAIL_FULL_MTT2,
+ ETH_FILTERS_MAC_ADD_FAIL_DUP_MTT2,
+ ETH_FILTERS_MAC_ADD_FAIL_DUP_STT2,
+ ETH_FILTERS_MAC_DEL_FAIL_NOF,
+ ETH_FILTERS_MAC_DEL_FAIL_NOF_MTT2,
+ ETH_FILTERS_MAC_DEL_FAIL_NOF_STT2,
+ ETH_FILTERS_MAC_ADD_FAIL_ZERO_MAC,
+ ETH_FILTERS_VLAN_ADD_FAIL_FULL,
+ ETH_FILTERS_VLAN_ADD_FAIL_DUP,
+ ETH_FILTERS_VLAN_DEL_FAIL_NOF,
+ ETH_FILTERS_VLAN_DEL_FAIL_NOF_TT1,
+ ETH_FILTERS_PAIR_ADD_FAIL_DUP,
+ ETH_FILTERS_PAIR_ADD_FAIL_FULL,
+ ETH_FILTERS_PAIR_ADD_FAIL_FULL_MAC,
+ ETH_FILTERS_PAIR_DEL_FAIL_NOF,
+ ETH_FILTERS_PAIR_DEL_FAIL_NOF_TT1,
+ ETH_FILTERS_PAIR_ADD_FAIL_ZERO_MAC,
+ ETH_FILTERS_VNI_ADD_FAIL_FULL,
+ ETH_FILTERS_VNI_ADD_FAIL_DUP,
+ ETH_FILTERS_GFT_UPDATE_FAIL,
+ ETH_RX_QUEUE_FAIL_LOAD_VF_DATA,
+ ETH_FILTERS_GFS_ADD_FILTER_FAIL_MAX_HOPS,
+ ETH_FILTERS_GFS_ADD_FILTER_FAIL_NO_FREE_ENRTY,
+ ETH_FILTERS_GFS_ADD_FILTER_FAIL_ALREADY_EXISTS,
+ ETH_FILTERS_GFS_ADD_FILTER_FAIL_PCI_ERROR,
+ ETH_FILTERS_GFS_ADD_FINLER_FAIL_MAGIC_NUM_ERROR,
+ ETH_FILTERS_GFS_DEL_FILTER_FAIL_MAX_HOPS,
+ ETH_FILTERS_GFS_DEL_FILTER_FAIL_NO_MATCH_ENRTY,
+ ETH_FILTERS_GFS_DEL_FILTER_FAIL_PCI_ERROR,
+ ETH_FILTERS_GFS_DEL_FILTER_FAIL_MAGIC_NUM_ERROR,
+ MAX_ETH_ERROR_CODE
+};
+
+/* Opcodes for the event ring */
+enum eth_event_opcode {
+ ETH_EVENT_UNUSED,
+ ETH_EVENT_VPORT_START,
+ ETH_EVENT_VPORT_UPDATE,
+ ETH_EVENT_VPORT_STOP,
+ ETH_EVENT_TX_QUEUE_START,
+ ETH_EVENT_TX_QUEUE_STOP,
+ ETH_EVENT_RX_QUEUE_START,
+ ETH_EVENT_RX_QUEUE_UPDATE,
+ ETH_EVENT_RX_QUEUE_STOP,
+ ETH_EVENT_FILTERS_UPDATE,
+ ETH_EVENT_RX_ADD_OPENFLOW_FILTER,
+ ETH_EVENT_RX_DELETE_OPENFLOW_FILTER,
+ ETH_EVENT_RX_CREATE_OPENFLOW_ACTION,
+ ETH_EVENT_RX_ADD_UDP_FILTER,
+ ETH_EVENT_RX_DELETE_UDP_FILTER,
+ ETH_EVENT_RX_CREATE_GFT_ACTION,
+ ETH_EVENT_RX_GFT_UPDATE_FILTER,
+ ETH_EVENT_TX_QUEUE_UPDATE,
+ ETH_EVENT_RGFS_ADD_FILTER,
+ ETH_EVENT_RGFS_DEL_FILTER,
+ ETH_EVENT_TGFS_ADD_FILTER,
+ ETH_EVENT_TGFS_DEL_FILTER,
+ ETH_EVENT_GFS_COUNTERS_REPORT_REQUEST,
+ MAX_ETH_EVENT_OPCODE
+};
+
+/* Classify rule types in E2/E3 */
+enum eth_filter_action {
+ ETH_FILTER_ACTION_UNUSED,
+ ETH_FILTER_ACTION_REMOVE,
+ ETH_FILTER_ACTION_ADD,
+ ETH_FILTER_ACTION_REMOVE_ALL,
+ MAX_ETH_FILTER_ACTION
+};
+
+/* Command for adding/removing a classification rule $$KEEP_ENDIANNESS$$ */
+struct eth_filter_cmd {
+ u8 type;
+ u8 vport_id;
+ u8 action;
+ u8 reserved0;
+ __le32 vni;
+ __le16 mac_lsb;
+ __le16 mac_mid;
+ __le16 mac_msb;
+ __le16 vlan_id;
+};
+
+/* $$KEEP_ENDIANNESS$$ */
+struct eth_filter_cmd_header {
+ u8 rx;
+ u8 tx;
+ u8 cmd_cnt;
+ u8 assert_on_error;
+ u8 reserved1[4];
+};
+
+/* Ethernet filter types: mac/vlan/pair */
+enum eth_filter_type {
+ ETH_FILTER_TYPE_UNUSED,
+ ETH_FILTER_TYPE_MAC,
+ ETH_FILTER_TYPE_VLAN,
+ ETH_FILTER_TYPE_PAIR,
+ ETH_FILTER_TYPE_INNER_MAC,
+ ETH_FILTER_TYPE_INNER_VLAN,
+ ETH_FILTER_TYPE_INNER_PAIR,
+ ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR,
+ ETH_FILTER_TYPE_MAC_VNI_PAIR,
+ ETH_FILTER_TYPE_VNI,
+ MAX_ETH_FILTER_TYPE
+};
+
+/* inner to inner vlan priority translation configurations */
+struct eth_in_to_in_pri_map_cfg {
+ u8 inner_vlan_pri_remap_en;
+ u8 reserved[7];
+ u8 non_rdma_in_to_in_pri_map[8];
+ u8 rdma_in_to_in_pri_map[8];
+};
+
+/* Eth IPv4 Fragment Type */
+enum eth_ipv4_frag_type {
+ ETH_IPV4_NOT_FRAG,
+ ETH_IPV4_FIRST_FRAG,
+ ETH_IPV4_NON_FIRST_FRAG,
+ MAX_ETH_IPV4_FRAG_TYPE
+};
+
+/* eth IPv4 Fragment Type */
+enum eth_ip_type {
+ ETH_IPV4,
+ ETH_IPV6,
+ MAX_ETH_IP_TYPE
+};
+
+/* Ethernet Ramrod Command IDs */
+enum eth_ramrod_cmd_id {
+ ETH_RAMROD_UNUSED,
+ ETH_RAMROD_VPORT_START,
+ ETH_RAMROD_VPORT_UPDATE,
+ ETH_RAMROD_VPORT_STOP,
+ ETH_RAMROD_RX_QUEUE_START,
+ ETH_RAMROD_RX_QUEUE_STOP,
+ ETH_RAMROD_TX_QUEUE_START,
+ ETH_RAMROD_TX_QUEUE_STOP,
+ ETH_RAMROD_FILTERS_UPDATE,
+ ETH_RAMROD_RX_QUEUE_UPDATE,
+ ETH_RAMROD_RX_CREATE_OPENFLOW_ACTION,
+ ETH_RAMROD_RX_ADD_OPENFLOW_FILTER,
+ ETH_RAMROD_RX_DELETE_OPENFLOW_FILTER,
+ ETH_RAMROD_RX_ADD_UDP_FILTER,
+ ETH_RAMROD_RX_DELETE_UDP_FILTER,
+ ETH_RAMROD_RX_CREATE_GFT_ACTION,
+ ETH_RAMROD_RX_UPDATE_GFT_FILTER,
+ ETH_RAMROD_TX_QUEUE_UPDATE,
+ ETH_RAMROD_RGFS_FILTER_ADD,
+ ETH_RAMROD_RGFS_FILTER_DEL,
+ ETH_RAMROD_TGFS_FILTER_ADD,
+ ETH_RAMROD_TGFS_FILTER_DEL,
+ ETH_RAMROD_GFS_COUNTERS_REPORT_REQUEST,
+ MAX_ETH_RAMROD_CMD_ID
+};
+
+/* Return code from eth sp ramrods */
+struct eth_return_code {
+ u8 value;
+#define ETH_RETURN_CODE_ERR_CODE_MASK 0x3F
+#define ETH_RETURN_CODE_ERR_CODE_SHIFT 0
+#define ETH_RETURN_CODE_RESERVED_MASK 0x1
+#define ETH_RETURN_CODE_RESERVED_SHIFT 6
+#define ETH_RETURN_CODE_RX_TX_MASK 0x1
+#define ETH_RETURN_CODE_RX_TX_SHIFT 7
+};
+
+/* tx destination enum */
+enum eth_tx_dst_mode_config_enum {
+ ETH_TX_DST_MODE_CONFIG_DISABLE,
+ ETH_TX_DST_MODE_CONFIG_FORWARD_DATA_IN_BD,
+ ETH_TX_DST_MODE_CONFIG_FORWARD_DATA_IN_VPORT,
+ MAX_ETH_TX_DST_MODE_CONFIG_ENUM
+};
+
+/* What to do in case an error occurs */
+enum eth_tx_err {
+ ETH_TX_ERR_DROP,
+ ETH_TX_ERR_ASSERT_MALICIOUS,
+ MAX_ETH_TX_ERR
+};
+
+/* Array of the different error type behaviors */
+struct eth_tx_err_vals {
+ __le16 values;
+#define ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE_MASK 0x1
+#define ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE_SHIFT 0
+#define ETH_TX_ERR_VALS_PACKET_TOO_SMALL_MASK 0x1
+#define ETH_TX_ERR_VALS_PACKET_TOO_SMALL_SHIFT 1
+#define ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR_MASK 0x1
+#define ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR_SHIFT 2
+#define ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS_MASK 0x1
+#define ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS_SHIFT 3
+#define ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG_MASK 0x1
+#define ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG_SHIFT 4
+#define ETH_TX_ERR_VALS_MTU_VIOLATION_MASK 0x1
+#define ETH_TX_ERR_VALS_MTU_VIOLATION_SHIFT 5
+#define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_MASK 0x1
+#define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_SHIFT 6
+#define ETH_TX_ERR_VALS_ILLEGAL_BD_FLAGS_MASK 0x1
+#define ETH_TX_ERR_VALS_ILLEGAL_BD_FLAGS_SHIFT 7
+#define ETH_TX_ERR_VALS_RESERVED_MASK 0xFF
+#define ETH_TX_ERR_VALS_RESERVED_SHIFT 8
+};
+
+/* vport rss configuration data */
+struct eth_vport_rss_config {
+ __le16 capabilities;
+#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_SHIFT 0
+#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_SHIFT 1
+#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_SHIFT 2
+#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_SHIFT 3
+#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_SHIFT 4
+#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_SHIFT 5
+#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_SHIFT 6
+#define ETH_VPORT_RSS_CONFIG_RESERVED0_MASK 0x1FF
+#define ETH_VPORT_RSS_CONFIG_RESERVED0_SHIFT 7
+ u8 rss_id;
+ u8 rss_mode;
+ u8 update_rss_key;
+ u8 update_rss_ind_table;
+ u8 update_rss_capabilities;
+ u8 tbl_size;
+ u8 ind_table_mask_valid;
+ u8 reserved2[3];
+ __le16 indirection_table[ETH_RSS_IND_TABLE_ENTRIES_NUM];
+ __le32 ind_table_mask[ETH_RSS_IND_TABLE_MASK_SIZE_REGS];
+ __le32 rss_key[ETH_RSS_KEY_SIZE_REGS];
+ __le32 reserved3;
+};
+
+/* eth vport RSS mode */
+enum eth_vport_rss_mode {
+ ETH_VPORT_RSS_MODE_DISABLED,
+ ETH_VPORT_RSS_MODE_REGULAR,
+ MAX_ETH_VPORT_RSS_MODE
+};
+
+/* Command for setting classification flags for a vport $$KEEP_ENDIANNESS$$ */
+struct eth_vport_rx_mode {
+ __le16 state;
+#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_MASK 0x1
+#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_SHIFT 0
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_SHIFT 1
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_MASK 0x1
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_SHIFT 2
+#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_MASK 0x1
+#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_SHIFT 3
+#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_SHIFT 4
+#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_SHIFT 5
+#define ETH_VPORT_RX_MODE_ACCEPT_ANY_VNI_MASK 0x1
+#define ETH_VPORT_RX_MODE_ACCEPT_ANY_VNI_SHIFT 6
+#define ETH_VPORT_RX_MODE_RESERVED1_MASK 0x1FF
+#define ETH_VPORT_RX_MODE_RESERVED1_SHIFT 7
+};
+
+/* Command for setting tpa parameters */
+struct eth_vport_tpa_param {
+ u8 tpa_ipv4_en_flg;
+ u8 tpa_ipv6_en_flg;
+ u8 tpa_ipv4_tunn_en_flg;
+ u8 tpa_ipv6_tunn_en_flg;
+ u8 tpa_pkt_split_flg;
+ u8 tpa_hdr_data_split_flg;
+ u8 tpa_gro_consistent_flg;
+
+ u8 tpa_max_aggs_num;
+
+ __le16 tpa_max_size;
+ __le16 tpa_min_size_to_start;
+
+ __le16 tpa_min_size_to_cont;
+ u8 max_buff_num;
+ u8 reserved;
+};
+
+/* Command for setting classification flags for a vport $$KEEP_ENDIANNESS$$ */
+struct eth_vport_tx_mode {
+ __le16 state;
+#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_MASK 0x1
+#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_SHIFT 0
+#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_SHIFT 1
+#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_MASK 0x1
+#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_SHIFT 2
+#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_SHIFT 3
+#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_SHIFT 4
+#define ETH_VPORT_TX_MODE_RESERVED1_MASK 0x7FF
+#define ETH_VPORT_TX_MODE_RESERVED1_SHIFT 5
+};
+
+/* GFT filter update action type */
+enum gft_filter_update_action {
+ GFT_ADD_FILTER,
+ GFT_DELETE_FILTER,
+ MAX_GFT_FILTER_UPDATE_ACTION
+};
+
+/* Ramrod data for rx create gft action */
+struct rx_create_gft_action_ramrod_data {
+ u8 vport_id;
+ u8 reserved[7];
+};
+
+/* Ramrod data for rx create openflow action */
+struct rx_create_openflow_action_ramrod_data {
+ u8 vport_id;
+ u8 reserved[7];
+};
+
+/* Ramrod data for rx add openflow filter */
+struct rx_openflow_filter_ramrod_data {
+ __le16 action_icid;
+ u8 priority;
+ u8 reserved0;
+ __le32 tenant_id;
+ __le16 dst_mac_hi;
+ __le16 dst_mac_mid;
+ __le16 dst_mac_lo;
+ __le16 src_mac_hi;
+ __le16 src_mac_mid;
+ __le16 src_mac_lo;
+ __le16 vlan_id;
+ __le16 l2_eth_type;
+ u8 ipv4_dscp;
+ u8 ipv4_frag_type;
+ u8 ipv4_over_ip;
+ u8 tenant_id_exists;
+ __le32 ipv4_dst_addr;
+ __le32 ipv4_src_addr;
+ __le16 l4_dst_port;
+ __le16 l4_src_port;
+};
+
+/* Ramrod data for rx queue start ramrod */
+struct rx_queue_start_ramrod_data {
+ __le16 rx_queue_id;
+ __le16 num_of_pbl_pages;
+ __le16 bd_max_bytes;
+ __le16 sb_id;
+ u8 sb_index;
+ u8 vport_id;
+ u8 default_rss_queue_flg;
+ u8 complete_cqe_flg;
+ u8 complete_event_flg;
+ u8 stats_counter_id;
+ u8 pin_context;
+ u8 pxp_tph_valid_bd;
+ u8 pxp_tph_valid_pkt;
+ u8 pxp_st_hint;
+
+ __le16 pxp_st_index;
+ u8 pmd_mode;
+
+ u8 notify_en;
+ u8 toggle_val;
+
+ u8 vf_rx_prod_index;
+ u8 vf_rx_prod_use_zone_a;
+ u8 reserved[5];
+ __le16 reserved1;
+ struct regpair cqe_pbl_addr;
+ struct regpair bd_base;
+ struct regpair reserved2;
+};
+
+/* Ramrod data for rx queue stop ramrod */
+struct rx_queue_stop_ramrod_data {
+ __le16 rx_queue_id;
+ u8 complete_cqe_flg;
+ u8 complete_event_flg;
+ u8 vport_id;
+ u8 reserved[3];
+};
+
+/* Ramrod data for rx queue update ramrod */
+struct rx_queue_update_ramrod_data {
+ __le16 rx_queue_id;
+ u8 complete_cqe_flg;
+ u8 complete_event_flg;
+ u8 vport_id;
+ u8 set_default_rss_queue;
+ u8 reserved[3];
+ u8 reserved1;
+ u8 reserved2;
+ u8 reserved3;
+ __le16 reserved4;
+ __le16 reserved5;
+ struct regpair reserved6;
+};
+
+/* Ramrod data for rx Add UDP Filter */
+struct rx_udp_filter_ramrod_data {
+ __le16 action_icid;
+ __le16 vlan_id;
+ u8 ip_type;
+ u8 tenant_id_exists;
+ __le16 reserved1;
+ __le32 ip_dst_addr[4];
+ __le32 ip_src_addr[4];
+ __le16 udp_dst_port;
+ __le16 udp_src_port;
+ __le32 tenant_id;
+};
+
+/* Add or delete GFT filter - filter is packet header of type of packet wished
+ * to pass certain FW flow.
+ */
+struct rx_update_gft_filter_ramrod_data {
+ struct regpair pkt_hdr_addr;
+ __le16 pkt_hdr_length;
+ __le16 action_icid;
+ __le16 rx_qid;
+ __le16 flow_id;
+ __le16 vport_id;
+ u8 action_icid_valid;
+ u8 rx_qid_valid;
+ u8 flow_id_valid;
+ u8 filter_action;
+ u8 assert_on_error;
+ u8 inner_vlan_removal_en;
+};
+
+/* Ramrod data for tx queue start ramrod */
+struct tx_queue_start_ramrod_data {
+ __le16 sb_id;
+ u8 sb_index;
+ u8 vport_id;
+ u8 reserved0;
+ u8 stats_counter_id;
+ __le16 qm_pq_id;
+ u8 flags;
+#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_MASK 0x1
+#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_SHIFT 0
+#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_MASK 0x1
+#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_SHIFT 1
+#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_MASK 0x1
+#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_SHIFT 2
+#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_MASK 0x1
+#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_SHIFT 3
+#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_MASK 0x1
+#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_SHIFT 4
+#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_MASK 0x7
+#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_SHIFT 5
+ u8 pxp_st_hint;
+ u8 pxp_tph_valid_bd;
+ u8 pxp_tph_valid_pkt;
+ __le16 pxp_st_index;
+ u8 comp_agg_size;
+ u8 reserved3;
+ __le16 queue_zone_id;
+ __le16 reserved2;
+ __le16 pbl_size;
+ __le16 tx_queue_id;
+ __le16 same_as_last_id;
+ __le16 reserved[3];
+ struct regpair pbl_base_addr;
+ struct regpair bd_cons_address;
+};
+
+/* Ramrod data for tx queue stop ramrod */
+struct tx_queue_stop_ramrod_data {
+ __le16 reserved[4];
+};
+
+/* Ramrod data for tx queue update ramrod */
+struct tx_queue_update_ramrod_data {
+ __le16 update_qm_pq_id_flg;
+ __le16 qm_pq_id;
+ __le32 reserved0;
+ struct regpair reserved1[5];
+};
+
+/* Inner to Inner VLAN priority map update mode */
+enum update_in_to_in_pri_map_mode_enum {
+ ETH_IN_TO_IN_PRI_MAP_UPDATE_DISABLED,
+ ETH_IN_TO_IN_PRI_MAP_UPDATE_NON_RDMA_TBL,
+ ETH_IN_TO_IN_PRI_MAP_UPDATE_RDMA_TBL,
+ MAX_UPDATE_IN_TO_IN_PRI_MAP_MODE_ENUM
+};
+
+/* Ramrod data for vport update ramrod */
+struct vport_filter_update_ramrod_data {
+ struct eth_filter_cmd_header filter_cmd_hdr;
+ struct eth_filter_cmd filter_cmds[ETH_FILTER_RULES_COUNT];
+};
+
+/* Ramrod data for vport start ramrod */
+struct vport_start_ramrod_data {
+ u8 vport_id;
+ u8 sw_fid;
+ __le16 mtu;
+ u8 drop_ttl0_en;
+ u8 inner_vlan_removal_en;
+ struct eth_vport_rx_mode rx_mode;
+ struct eth_vport_tx_mode tx_mode;
+ struct eth_vport_tpa_param tpa_param;
+ __le16 default_vlan;
+ u8 tx_switching_en;
+ u8 anti_spoofing_en;
+ u8 default_vlan_en;
+ u8 handle_ptp_pkts;
+ u8 silent_vlan_removal_en;
+ u8 untagged;
+ struct eth_tx_err_vals tx_err_behav;
+ u8 zero_placement_offset;
+ u8 ctl_frame_mac_check_en;
+ u8 ctl_frame_ethtype_check_en;
+ u8 reserved0;
+ u8 reserved1;
+ u8 tx_dst_port_mode_config;
+ u8 dst_vport_id;
+ u8 tx_dst_port_mode;
+ u8 dst_vport_id_valid;
+ u8 wipe_inner_vlan_pri_en;
+ u8 reserved2[2];
+ struct eth_in_to_in_pri_map_cfg in_to_in_vlan_pri_map_cfg;
+};
+
+/* Ramrod data for vport stop ramrod */
+struct vport_stop_ramrod_data {
+ u8 vport_id;
+ u8 reserved[7];
+};
+
+/* Ramrod data for vport update ramrod */
+struct vport_update_ramrod_data_cmn {
+ u8 vport_id;
+ u8 update_rx_active_flg;
+ u8 rx_active_flg;
+ u8 update_tx_active_flg;
+ u8 tx_active_flg;
+ u8 update_rx_mode_flg;
+ u8 update_tx_mode_flg;
+ u8 update_approx_mcast_flg;
+
+ u8 update_rss_flg;
+ u8 update_inner_vlan_removal_en_flg;
+
+ u8 inner_vlan_removal_en;
+ u8 update_tpa_param_flg;
+ u8 update_tpa_en_flg;
+ u8 update_tx_switching_en_flg;
+
+ u8 tx_switching_en;
+ u8 update_anti_spoofing_en_flg;
+
+ u8 anti_spoofing_en;
+ u8 update_handle_ptp_pkts;
+
+ u8 handle_ptp_pkts;
+ u8 update_default_vlan_en_flg;
+
+ u8 default_vlan_en;
+
+ u8 update_default_vlan_flg;
+
+ __le16 default_vlan;
+ u8 update_accept_any_vlan_flg;
+
+ u8 accept_any_vlan;
+ u8 silent_vlan_removal_en;
+ u8 update_mtu_flg;
+
+ __le16 mtu;
+ u8 update_ctl_frame_checks_en_flg;
+ u8 ctl_frame_mac_check_en;
+ u8 ctl_frame_ethtype_check_en;
+ u8 update_in_to_in_pri_map_mode;
+ u8 in_to_in_pri_map[8];
+ u8 update_tx_dst_port_mode_flg;
+ u8 tx_dst_port_mode_config;
+ u8 dst_vport_id;
+ u8 tx_dst_port_mode;
+ u8 dst_vport_id_valid;
+ u8 reserved[1];
+};
+
+struct vport_update_ramrod_mcast {
+ __le32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
+};
+
+/* Ramrod data for vport update ramrod */
+struct vport_update_ramrod_data {
+ struct vport_update_ramrod_data_cmn common;
+
+ struct eth_vport_rx_mode rx_mode;
+ struct eth_vport_tx_mode tx_mode;
+ __le32 reserved[3];
+ struct eth_vport_tpa_param tpa_param;
+ struct vport_update_ramrod_mcast approx_mcast;
+ struct eth_vport_rss_config rss_config;
+};
+
+struct xstorm_eth_conn_ag_ctx_dq_ext_ldpart {
+ u8 reserved0;
+ u8 state;
+ u8 flags0;
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT 0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_SHIFT 1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_SHIFT 2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT 3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_SHIFT 4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_SHIFT 5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_SHIFT 6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_SHIFT 7
+ u8 flags1;
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_SHIFT 0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_SHIFT 1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_SHIFT 2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_SHIFT 3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_E5_RESERVED2_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_E5_RESERVED2_SHIFT 4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_E5_RESERVED3_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_E5_RESERVED3_SHIFT 5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_SHIFT 6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_SHIFT 7
+ u8 flags2;
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0_SHIFT 0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1_SHIFT 2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2_SHIFT 4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3_SHIFT 6
+ u8 flags3;
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4_SHIFT 0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5_SHIFT 2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6_SHIFT 4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7_SHIFT 6
+ u8 flags4;
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8_SHIFT 0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9_SHIFT 2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10_SHIFT 4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11_SHIFT 6
+ u8 flags5;
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12_SHIFT 0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13_SHIFT 2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14_SHIFT 4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15_SHIFT 6
+ u8 flags6;
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_SHIFT 0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_SHIFT 2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_SHIFT 4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_SHIFT 6
+ u8 flags7;
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_SHIFT 0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_SHIFT 2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT 4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_SHIFT 6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_SHIFT 7
+ u8 flags8;
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_SHIFT 0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_SHIFT 1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_SHIFT 2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_SHIFT 3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_SHIFT 4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_SHIFT 5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_SHIFT 6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_SHIFT 7
+ u8 flags9;
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_SHIFT 0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_SHIFT 1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_SHIFT 2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_SHIFT 3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_SHIFT 4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_SHIFT 5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_SHIFT 6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_SHIFT 7
+ u8 flags10;
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_SHIFT 0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_SHIFT 1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_SHIFT 2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_SHIFT 3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT 4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_SHIFT 6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_SHIFT 7
+ u8 flags11;
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_SHIFT 0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_SHIFT 1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_SHIFT 2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_SHIFT 3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_SHIFT 4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_SHIFT 5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT 6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_SHIFT 7
+ u8 flags12;
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_SHIFT 0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_SHIFT 1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT 2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT 3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_SHIFT 4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_SHIFT 5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_SHIFT 6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_SHIFT 7
+ u8 flags13;
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_SHIFT 0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_SHIFT 1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT 2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT 3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT 4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT 5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT 6
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_SHIFT 0
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_SHIFT 1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_SHIFT 2
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_SHIFT 3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_SHIFT 4
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK 0x1
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_MASK 0x3
+#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_SHIFT 6
+ u8 edpm_event_id;
+ __le16 physical_q0;
+ __le16 e5_reserved1;
+ __le16 edpm_num_bds;
+ __le16 tx_bd_cons;
+ __le16 tx_bd_prod;
+ __le16 updated_qm_pq_id;
+ __le16 conn_dpi;
+ u8 byte3;
+ u8 byte4;
+ u8 byte5;
+ u8 byte6;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le32 reg4;
+};
+
+struct mstorm_eth_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
+ __le16 word0;
+ __le16 word1;
+ __le32 reg0;
+ __le32 reg1;
+};
+
+struct xstorm_eth_hw_conn_ag_ctx {
+ u8 reserved0;
+ u8 state;
+ u8 flags0;
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT 7
+ u8 flags1;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED2_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED2_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED3_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED3_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+ u8 flags2;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT 6
+ u8 flags3;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT 6
+ u8 flags4;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT 6
+ u8 flags5;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT 6
+ u8 flags6;
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+ u8 flags7;
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT 7
+ u8 flags8;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT 7
+ u8 flags9;
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
+ u8 flags10;
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT 7
+ u8 flags11;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT 7
+ u8 flags12;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT 7
+ u8 flags13;
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
+ u8 edpm_event_id;
+ __le16 physical_q0;
+ __le16 e5_reserved1;
+ __le16 edpm_num_bds;
+ __le16 tx_bd_cons;
+ __le16 tx_bd_prod;
+ __le16 updated_qm_pq_id;
+ __le16 conn_dpi;
+};
+
+/* GFT CAM line struct with fields breakout */
+struct gft_cam_line_mapped {
+ __le32 camline;
+#define GFT_CAM_LINE_MAPPED_VALID_MASK 0x1
+#define GFT_CAM_LINE_MAPPED_VALID_SHIFT 0
+#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK 0x1
+#define GFT_CAM_LINE_MAPPED_IP_VERSION_SHIFT 1
+#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK 0x1
+#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_SHIFT 2
+#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK 0xF
+#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_SHIFT 3
+#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK 0xF
+#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_SHIFT 7
+#define GFT_CAM_LINE_MAPPED_PF_ID_MASK 0xF
+#define GFT_CAM_LINE_MAPPED_PF_ID_SHIFT 11
+#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK_MASK 0x1
+#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK_SHIFT 15
+#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK_MASK 0x1
+#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK_SHIFT 16
+#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK 0xF
+#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_SHIFT 17
+#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK_MASK 0xF
+#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK_SHIFT 21
+#define GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK 0xF
+#define GFT_CAM_LINE_MAPPED_PF_ID_MASK_SHIFT 25
+#define GFT_CAM_LINE_MAPPED_RESERVED1_MASK 0x7
+#define GFT_CAM_LINE_MAPPED_RESERVED1_SHIFT 29
+};
+
+/* Used in gft_profile_key: Indication for ip version */
+enum gft_profile_ip_version {
+ GFT_PROFILE_IPV4 = 0,
+ GFT_PROFILE_IPV6 = 1,
+ MAX_GFT_PROFILE_IP_VERSION
+};
+
+/* Profile key stucr fot GFT logic in Prs */
+struct gft_profile_key {
+ __le16 profile_key;
+#define GFT_PROFILE_KEY_IP_VERSION_MASK 0x1
+#define GFT_PROFILE_KEY_IP_VERSION_SHIFT 0
+#define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_MASK 0x1
+#define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_SHIFT 1
+#define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_MASK 0xF
+#define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_SHIFT 2
+#define GFT_PROFILE_KEY_TUNNEL_TYPE_MASK 0xF
+#define GFT_PROFILE_KEY_TUNNEL_TYPE_SHIFT 6
+#define GFT_PROFILE_KEY_PF_ID_MASK 0xF
+#define GFT_PROFILE_KEY_PF_ID_SHIFT 10
+#define GFT_PROFILE_KEY_RESERVED0_MASK 0x3
+#define GFT_PROFILE_KEY_RESERVED0_SHIFT 14
+};
+
+/* Used in gft_profile_key: Indication for tunnel type */
+enum gft_profile_tunnel_type {
+ GFT_PROFILE_NO_TUNNEL = 0,
+ GFT_PROFILE_VXLAN_TUNNEL = 1,
+ GFT_PROFILE_GRE_MAC_OR_NVGRE_TUNNEL = 2,
+ GFT_PROFILE_GRE_IP_TUNNEL = 3,
+ GFT_PROFILE_GENEVE_MAC_TUNNEL = 4,
+ GFT_PROFILE_GENEVE_IP_TUNNEL = 5,
+ MAX_GFT_PROFILE_TUNNEL_TYPE
+};
+
+/* Used in gft_profile_key: Indication for protocol type */
+enum gft_profile_upper_protocol_type {
+ GFT_PROFILE_ROCE_PROTOCOL = 0,
+ GFT_PROFILE_RROCE_PROTOCOL = 1,
+ GFT_PROFILE_FCOE_PROTOCOL = 2,
+ GFT_PROFILE_ICMP_PROTOCOL = 3,
+ GFT_PROFILE_ARP_PROTOCOL = 4,
+ GFT_PROFILE_USER_TCP_SRC_PORT_1_INNER = 5,
+ GFT_PROFILE_USER_TCP_DST_PORT_1_INNER = 6,
+ GFT_PROFILE_TCP_PROTOCOL = 7,
+ GFT_PROFILE_USER_UDP_DST_PORT_1_INNER = 8,
+ GFT_PROFILE_USER_UDP_DST_PORT_2_OUTER = 9,
+ GFT_PROFILE_UDP_PROTOCOL = 10,
+ GFT_PROFILE_USER_IP_1_INNER = 11,
+ GFT_PROFILE_USER_IP_2_OUTER = 12,
+ GFT_PROFILE_USER_ETH_1_INNER = 13,
+ GFT_PROFILE_USER_ETH_2_OUTER = 14,
+ GFT_PROFILE_RAW = 15,
+ MAX_GFT_PROFILE_UPPER_PROTOCOL_TYPE
+};
+
+/* GFT RAM line struct */
+struct gft_ram_line {
+ __le32 lo;
+#define GFT_RAM_LINE_VLAN_SELECT_MASK 0x3
+#define GFT_RAM_LINE_VLAN_SELECT_SHIFT 0
+#define GFT_RAM_LINE_TUNNEL_ENTROPHY_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_ENTROPHY_SHIFT 2
+#define GFT_RAM_LINE_TUNNEL_TTL_EQUAL_ONE_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_TTL_EQUAL_ONE_SHIFT 3
+#define GFT_RAM_LINE_TUNNEL_TTL_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_TTL_SHIFT 4
+#define GFT_RAM_LINE_TUNNEL_ETHERTYPE_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_ETHERTYPE_SHIFT 5
+#define GFT_RAM_LINE_TUNNEL_DST_PORT_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_DST_PORT_SHIFT 6
+#define GFT_RAM_LINE_TUNNEL_SRC_PORT_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_SRC_PORT_SHIFT 7
+#define GFT_RAM_LINE_TUNNEL_DSCP_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_DSCP_SHIFT 8
+#define GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL_SHIFT 9
+#define GFT_RAM_LINE_TUNNEL_DST_IP_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_DST_IP_SHIFT 10
+#define GFT_RAM_LINE_TUNNEL_SRC_IP_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_SRC_IP_SHIFT 11
+#define GFT_RAM_LINE_TUNNEL_PRIORITY_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_PRIORITY_SHIFT 12
+#define GFT_RAM_LINE_TUNNEL_PROVIDER_VLAN_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_PROVIDER_VLAN_SHIFT 13
+#define GFT_RAM_LINE_TUNNEL_VLAN_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_VLAN_SHIFT 14
+#define GFT_RAM_LINE_TUNNEL_DST_MAC_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_DST_MAC_SHIFT 15
+#define GFT_RAM_LINE_TUNNEL_SRC_MAC_MASK 0x1
+#define GFT_RAM_LINE_TUNNEL_SRC_MAC_SHIFT 16
+#define GFT_RAM_LINE_TTL_EQUAL_ONE_MASK 0x1
+#define GFT_RAM_LINE_TTL_EQUAL_ONE_SHIFT 17
+#define GFT_RAM_LINE_TTL_MASK 0x1
+#define GFT_RAM_LINE_TTL_SHIFT 18
+#define GFT_RAM_LINE_ETHERTYPE_MASK 0x1
+#define GFT_RAM_LINE_ETHERTYPE_SHIFT 19
+#define GFT_RAM_LINE_RESERVED0_MASK 0x1
+#define GFT_RAM_LINE_RESERVED0_SHIFT 20
+#define GFT_RAM_LINE_TCP_FLAG_FIN_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_FIN_SHIFT 21
+#define GFT_RAM_LINE_TCP_FLAG_SYN_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_SYN_SHIFT 22
+#define GFT_RAM_LINE_TCP_FLAG_RST_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_RST_SHIFT 23
+#define GFT_RAM_LINE_TCP_FLAG_PSH_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_PSH_SHIFT 24
+#define GFT_RAM_LINE_TCP_FLAG_ACK_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_ACK_SHIFT 25
+#define GFT_RAM_LINE_TCP_FLAG_URG_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_URG_SHIFT 26
+#define GFT_RAM_LINE_TCP_FLAG_ECE_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_ECE_SHIFT 27
+#define GFT_RAM_LINE_TCP_FLAG_CWR_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_CWR_SHIFT 28
+#define GFT_RAM_LINE_TCP_FLAG_NS_MASK 0x1
+#define GFT_RAM_LINE_TCP_FLAG_NS_SHIFT 29
+#define GFT_RAM_LINE_DST_PORT_MASK 0x1
+#define GFT_RAM_LINE_DST_PORT_SHIFT 30
+#define GFT_RAM_LINE_SRC_PORT_MASK 0x1
+#define GFT_RAM_LINE_SRC_PORT_SHIFT 31
+ __le32 hi;
+#define GFT_RAM_LINE_DSCP_MASK 0x1
+#define GFT_RAM_LINE_DSCP_SHIFT 0
+#define GFT_RAM_LINE_OVER_IP_PROTOCOL_MASK 0x1
+#define GFT_RAM_LINE_OVER_IP_PROTOCOL_SHIFT 1
+#define GFT_RAM_LINE_DST_IP_MASK 0x1
+#define GFT_RAM_LINE_DST_IP_SHIFT 2
+#define GFT_RAM_LINE_SRC_IP_MASK 0x1
+#define GFT_RAM_LINE_SRC_IP_SHIFT 3
+#define GFT_RAM_LINE_PRIORITY_MASK 0x1
+#define GFT_RAM_LINE_PRIORITY_SHIFT 4
+#define GFT_RAM_LINE_PROVIDER_VLAN_MASK 0x1
+#define GFT_RAM_LINE_PROVIDER_VLAN_SHIFT 5
+#define GFT_RAM_LINE_VLAN_MASK 0x1
+#define GFT_RAM_LINE_VLAN_SHIFT 6
+#define GFT_RAM_LINE_DST_MAC_MASK 0x1
+#define GFT_RAM_LINE_DST_MAC_SHIFT 7
+#define GFT_RAM_LINE_SRC_MAC_MASK 0x1
+#define GFT_RAM_LINE_SRC_MAC_SHIFT 8
+#define GFT_RAM_LINE_TENANT_ID_MASK 0x1
+#define GFT_RAM_LINE_TENANT_ID_SHIFT 9
+#define GFT_RAM_LINE_RESERVED1_MASK 0x3FFFFF
+#define GFT_RAM_LINE_RESERVED1_SHIFT 10
+};
+
+/* Used in the first 2 bits for gft_ram_line: Indication for vlan mask */
+enum gft_vlan_select {
+ INNER_PROVIDER_VLAN = 0,
+ INNER_VLAN = 1,
+ OUTER_PROVIDER_VLAN = 2,
+ OUTER_VLAN = 3,
+ MAX_GFT_VLAN_SELECT
+};
+
+/* The rdma task context of Mstorm */
+struct ystorm_rdma_task_st_ctx {
+ struct regpair temp[4];
+};
+
+struct ystorm_rdma_task_ag_ctx {
+ u8 reserved;
+ u8 byte1;
+ __le16 msem_ctx_upd_seq;
+ u8 flags0;
+#define YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define YSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
+#define YSTORM_RDMA_TASK_AG_CTX_VALID_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_VALID_SHIFT 6
+#define YSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_SHIFT 7
+ u8 flags1;
+#define YSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
+#define YSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0
+#define YSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3
+#define YSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2
+#define YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
+#define YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
+#define YSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6
+#define YSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7
+ u8 flags2;
+#define YSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0
+#define YSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2
+#define YSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3
+#define YSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4
+#define YSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5
+#define YSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6
+#define YSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7
+ u8 key;
+ __le32 mw_cnt_or_qp_id;
+ u8 ref_cnt_seq;
+ u8 ctx_upd_seq;
+ __le16 dif_flags;
+ __le16 tx_ref_count;
+ __le16 last_used_ltid;
+ __le16 parent_mr_lo;
+ __le16 parent_mr_hi;
+ __le32 fbo_lo;
+ __le32 fbo_hi;
+};
+
+struct mstorm_rdma_task_ag_ctx {
+ u8 reserved;
+ u8 byte1;
+ __le16 icid;
+ u8 flags0;
+#define MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define MSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
+#define MSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6
+#define MSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_SHIFT 7
+ u8 flags1;
+#define MSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
+#define MSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0
+#define MSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3
+#define MSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2
+#define MSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3
+#define MSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 4
+#define MSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6
+#define MSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7
+ u8 flags2;
+#define MSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 0
+#define MSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2
+#define MSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3
+#define MSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4
+#define MSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5
+#define MSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6
+#define MSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7
+ u8 key;
+ __le32 mw_cnt_or_qp_id;
+ u8 ref_cnt_seq;
+ u8 ctx_upd_seq;
+ __le16 dif_flags;
+ __le16 tx_ref_count;
+ __le16 last_used_ltid;
+ __le16 parent_mr_lo;
+ __le16 parent_mr_hi;
+ __le32 fbo_lo;
+ __le32 fbo_hi;
+};
+
+/* The roce task context of Mstorm */
+struct mstorm_rdma_task_st_ctx {
+ struct regpair temp[4];
+};
+
+/* The roce task context of Ustorm */
+struct ustorm_rdma_task_st_ctx {
+ struct regpair temp[6];
+};
+
+struct ustorm_rdma_task_ag_ctx {
+ u8 reserved;
+ u8 state;
+ __le16 icid;
+ u8 flags0;
+#define USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define USTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
+#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_MASK 0x3
+#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_SHIFT 6
+ u8 flags1;
+#define USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_MASK 0x3
+#define USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_SHIFT 0
+#define USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_MASK 0x3
+#define USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_SHIFT 2
+#define USTORM_RDMA_TASK_AG_CTX_DIF_BLOCK_SIZE_MASK 0x3
+#define USTORM_RDMA_TASK_AG_CTX_DIF_BLOCK_SIZE_SHIFT 4
+#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
+#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
+ u8 flags2;
+#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_SHIFT 0
+#define USTORM_RDMA_TASK_AG_CTX_RESERVED2_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RESERVED2_SHIFT 1
+#define USTORM_RDMA_TASK_AG_CTX_RESERVED3_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RESERVED3_SHIFT 2
+#define USTORM_RDMA_TASK_AG_CTX_RESERVED4_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RESERVED4_SHIFT 3
+#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
+#define USTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 5
+#define USTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 6
+#define USTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 7
+ u8 flags3;
+#define USTORM_RDMA_TASK_AG_CTX_DIF_RXMIT_PROD_CONS_EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_DIF_RXMIT_PROD_CONS_EN_SHIFT 0
+#define USTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 1
+#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_PROD_CONS_EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_PROD_CONS_EN_SHIFT 2
+#define USTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 3
+#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
+#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
+ __le32 dif_err_intervals;
+ __le32 dif_error_1st_interval;
+ __le32 dif_rxmit_cons;
+ __le32 dif_rxmit_prod;
+ __le32 sge_index;
+ __le32 sq_cons;
+ u8 byte2;
+ u8 byte3;
+ __le16 dif_write_cons;
+ __le16 dif_write_prod;
+ __le16 word3;
+ __le32 dif_error_buffer_address_lo;
+ __le32 dif_error_buffer_address_hi;
+};
+
+/* RDMA task context */
+struct rdma_task_context {
+ struct ystorm_rdma_task_st_ctx ystorm_st_context;
+ struct ystorm_rdma_task_ag_ctx ystorm_ag_context;
+ struct tdif_task_context tdif_context;
+ struct mstorm_rdma_task_ag_ctx mstorm_ag_context;
+ struct mstorm_rdma_task_st_ctx mstorm_st_context;
+ struct rdif_task_context rdif_context;
+ struct ustorm_rdma_task_st_ctx ustorm_st_context;
+ struct regpair ustorm_st_padding[2];
+ struct ustorm_rdma_task_ag_ctx ustorm_ag_context;
+};
+
+#define TOE_MAX_RAMROD_PER_PF 8
+#define TOE_TX_PAGE_SIZE_BYTES 4096
+#define TOE_GRQ_PAGE_SIZE_BYTES 4096
+#define TOE_RX_CQ_PAGE_SIZE_BYTES 4096
+
+#define TOE_RX_MAX_RSS_CHAINS 64
+#define TOE_TX_MAX_TSS_CHAINS 64
+#define TOE_RSS_INDIRECTION_TABLE_SIZE 128
+
+/* The toe storm context of Mstorm */
+struct mstorm_toe_conn_st_ctx {
+ __le32 reserved[24];
+};
+
+/* The toe storm context of Pstorm */
+struct pstorm_toe_conn_st_ctx {
+ __le32 reserved[36];
+};
+
+/* The toe storm context of Ystorm */
+struct ystorm_toe_conn_st_ctx {
+ __le32 reserved[8];
+};
+
+/* The toe storm context of Xstorm */
+struct xstorm_toe_conn_st_ctx {
+ __le32 reserved[44];
+};
+
+struct ystorm_toe_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define YSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define YSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define YSTORM_TOE_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_TOE_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_MASK 0x3
+#define YSTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_SHIFT 2
+#define YSTORM_TOE_CONN_AG_CTX_RESET_RECEIVED_CF_MASK 0x3
+#define YSTORM_TOE_CONN_AG_CTX_RESET_RECEIVED_CF_SHIFT 4
+#define YSTORM_TOE_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_TOE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define YSTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_EN_MASK 0x1
+#define YSTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_EN_SHIFT 0
+#define YSTORM_TOE_CONN_AG_CTX_RESET_RECEIVED_CF_EN_MASK 0x1
+#define YSTORM_TOE_CONN_AG_CTX_RESET_RECEIVED_CF_EN_SHIFT 1
+#define YSTORM_TOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_TOE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_TOE_CONN_AG_CTX_REL_SEQ_EN_MASK 0x1
+#define YSTORM_TOE_CONN_AG_CTX_REL_SEQ_EN_SHIFT 3
+#define YSTORM_TOE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_TOE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_TOE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_TOE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_TOE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_TOE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_TOE_CONN_AG_CTX_CONS_PROD_EN_MASK 0x1
+#define YSTORM_TOE_CONN_AG_CTX_CONS_PROD_EN_SHIFT 7
+ u8 completion_opcode;
+ u8 byte3;
+ __le16 word0;
+ __le32 rel_seq;
+ __le32 rel_seq_threshold;
+ __le16 app_prod;
+ __le16 app_cons;
+ __le16 word3;
+ __le16 word4;
+ __le32 reg2;
+ __le32 reg3;
+};
+
+struct xstorm_toe_conn_ag_ctx {
+ u8 reserved0;
+ u8 state;
+ u8 flags0;
+#define XSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1
+#define XSTORM_TOE_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RESERVED1_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_TOE_CONN_AG_CTX_TX_DEC_RULE_RES_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_TX_DEC_RULE_RES_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RESERVED2_SHIFT 5
+#define XSTORM_TOE_CONN_AG_CTX_BIT6_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT6_SHIFT 6
+#define XSTORM_TOE_CONN_AG_CTX_BIT7_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT7_SHIFT 7
+ u8 flags1;
+#define XSTORM_TOE_CONN_AG_CTX_BIT8_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT8_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_BIT9_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT9_SHIFT 1
+#define XSTORM_TOE_CONN_AG_CTX_BIT10_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT10_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_TOE_CONN_AG_CTX_BIT12_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_BIT13_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_TOE_CONN_AG_CTX_BIT14_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT14_SHIFT 6
+#define XSTORM_TOE_CONN_AG_CTX_BIT15_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT15_SHIFT 7
+ u8 flags2;
+#define XSTORM_TOE_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6
+ u8 flags3;
+#define XSTORM_TOE_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_CF7_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF7_SHIFT 6
+ u8 flags4;
+#define XSTORM_TOE_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF11_SHIFT 6
+ u8 flags5;
+#define XSTORM_TOE_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF15_SHIFT 6
+ u8 flags6;
+#define XSTORM_TOE_CONN_AG_CTX_CF16_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF16_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_CF18_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF18_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_DQ_FLUSH_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_DQ_FLUSH_SHIFT 6
+ u8 flags7;
+#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q1_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q1_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_TOE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF1EN_SHIFT 7
+ u8 flags8;
+#define XSTORM_TOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1
+#define XSTORM_TOE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_TOE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_TOE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_TOE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF9EN_SHIFT 7
+ u8 flags9;
+#define XSTORM_TOE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_TOE_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_TOE_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_TOE_CONN_AG_CTX_CF16EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF16EN_SHIFT 6
+#define XSTORM_TOE_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF17EN_SHIFT 7
+ u8 flags10;
+#define XSTORM_TOE_CONN_AG_CTX_CF18EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF18EN_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1
+#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q1_EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT 3
+#define XSTORM_TOE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_CF23EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF23EN_SHIFT 5
+#define XSTORM_TOE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE0EN_SHIFT 6
+#define XSTORM_TOE_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_SHIFT 7
+ u8 flags11;
+#define XSTORM_TOE_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE3EN_SHIFT 1
+#define XSTORM_TOE_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RESERVED3_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_TOE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_TOE_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE9EN_SHIFT 7
+ u8 flags12;
+#define XSTORM_TOE_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_TOE_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_TOE_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_TOE_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE17EN_SHIFT 7
+ u8 flags13;
+#define XSTORM_TOE_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define XSTORM_TOE_CONN_AG_CTX_BIT16_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT16_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_BIT17_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_TOE_CONN_AG_CTX_BIT18_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT18_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_BIT19_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT19_SHIFT 3
+#define XSTORM_TOE_CONN_AG_CTX_BIT20_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT20_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_BIT21_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT21_SHIFT 5
+#define XSTORM_TOE_CONN_AG_CTX_CF23_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF23_SHIFT 6
+ u8 byte2;
+ __le16 physical_q0;
+ __le16 physical_q1;
+ __le16 word2;
+ __le16 word3;
+ __le16 bd_prod;
+ __le16 word5;
+ __le16 word6;
+ u8 byte3;
+ u8 byte4;
+ u8 byte5;
+ u8 byte6;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 more_to_send_seq;
+ __le32 local_adv_wnd_seq;
+ __le32 reg5;
+ __le32 reg6;
+ __le16 word7;
+ __le16 word8;
+ __le16 word9;
+ __le16 word10;
+ __le32 reg7;
+ __le32 reg8;
+ __le32 reg9;
+ u8 byte7;
+ u8 byte8;
+ u8 byte9;
+ u8 byte10;
+ u8 byte11;
+ u8 byte12;
+ u8 byte13;
+ u8 byte14;
+ u8 byte15;
+ u8 e5_reserved;
+ __le16 word11;
+ __le32 reg10;
+ __le32 reg11;
+ __le32 reg12;
+ __le32 reg13;
+ __le32 reg14;
+ __le32 reg15;
+ __le32 reg16;
+ __le32 reg17;
+};
+
+struct tstorm_toe_conn_ag_ctx {
+ u8 reserved0;
+ u8 byte1;
+ u8 flags0;
+#define TSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define TSTORM_TOE_CONN_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_TOE_CONN_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_TOE_CONN_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_TOE_CONN_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_TOE_CONN_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_TOE_CONN_AG_CTX_TIMEOUT_CF_MASK 0x3
+#define TSTORM_TOE_CONN_AG_CTX_TIMEOUT_CF_SHIFT 6
+ u8 flags1;
+#define TSTORM_TOE_CONN_AG_CTX_CF1_MASK 0x3
+#define TSTORM_TOE_CONN_AG_CTX_CF1_SHIFT 0
+#define TSTORM_TOE_CONN_AG_CTX_CF2_MASK 0x3
+#define TSTORM_TOE_CONN_AG_CTX_CF2_SHIFT 2
+#define TSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define TSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4
+#define TSTORM_TOE_CONN_AG_CTX_CF4_MASK 0x3
+#define TSTORM_TOE_CONN_AG_CTX_CF4_SHIFT 6
+ u8 flags2;
+#define TSTORM_TOE_CONN_AG_CTX_CF5_MASK 0x3
+#define TSTORM_TOE_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_TOE_CONN_AG_CTX_CF6_MASK 0x3
+#define TSTORM_TOE_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_TOE_CONN_AG_CTX_CF7_MASK 0x3
+#define TSTORM_TOE_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_TOE_CONN_AG_CTX_CF8_MASK 0x3
+#define TSTORM_TOE_CONN_AG_CTX_CF8_SHIFT 6
+ u8 flags3;
+#define TSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define TSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define TSTORM_TOE_CONN_AG_CTX_CF10_MASK 0x3
+#define TSTORM_TOE_CONN_AG_CTX_CF10_SHIFT 2
+#define TSTORM_TOE_CONN_AG_CTX_TIMEOUT_CF_EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_TIMEOUT_CF_EN_SHIFT 4
+#define TSTORM_TOE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_CF1EN_SHIFT 5
+#define TSTORM_TOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_CF2EN_SHIFT 6
+#define TSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7
+ u8 flags4;
+#define TSTORM_TOE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_CF4EN_SHIFT 0
+#define TSTORM_TOE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_TOE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_TOE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_TOE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 5
+#define TSTORM_TOE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_CF10EN_SHIFT 6
+#define TSTORM_TOE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags5;
+#define TSTORM_TOE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_TOE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_TOE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_TOE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_TOE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_TOE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define TSTORM_TOE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_TOE_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_RULE8EN_SHIFT 7
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le32 reg4;
+ __le32 reg5;
+ __le32 reg6;
+ __le32 reg7;
+ __le32 reg8;
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+};
+
+struct ustorm_toe_conn_ag_ctx {
+ u8 reserved;
+ u8 byte1;
+ u8 flags0;
+#define USTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define USTORM_TOE_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_TOE_CONN_AG_CTX_CF0_MASK 0x3
+#define USTORM_TOE_CONN_AG_CTX_CF0_SHIFT 2
+#define USTORM_TOE_CONN_AG_CTX_CF1_MASK 0x3
+#define USTORM_TOE_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_TOE_CONN_AG_CTX_PUSH_TIMER_CF_MASK 0x3
+#define USTORM_TOE_CONN_AG_CTX_PUSH_TIMER_CF_SHIFT 6
+ u8 flags1;
+#define USTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define USTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 0
+#define USTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_MASK 0x3
+#define USTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_SHIFT 2
+#define USTORM_TOE_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define USTORM_TOE_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define USTORM_TOE_CONN_AG_CTX_CF6_MASK 0x3
+#define USTORM_TOE_CONN_AG_CTX_CF6_SHIFT 6
+ u8 flags2;
+#define USTORM_TOE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_TOE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_TOE_CONN_AG_CTX_PUSH_TIMER_CF_EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_PUSH_TIMER_CF_EN_SHIFT 2
+#define USTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 3
+#define USTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_EN_SHIFT 4
+#define USTORM_TOE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_DQ_CF_EN_SHIFT 5
+#define USTORM_TOE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_TOE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags3;
+#define USTORM_TOE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_TOE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_TOE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_TOE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_TOE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_TOE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_TOE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_TOE_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_RULE8EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le16 word1;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le16 word2;
+ __le16 word3;
+};
+
+/* The toe storm context of Tstorm */
+struct tstorm_toe_conn_st_ctx {
+ __le32 reserved[16];
+};
+
+/* The toe storm context of Ustorm */
+struct ustorm_toe_conn_st_ctx {
+ __le32 reserved[52];
+};
+
+/* toe connection context */
+struct toe_conn_context {
+ struct ystorm_toe_conn_st_ctx ystorm_st_context;
+ struct pstorm_toe_conn_st_ctx pstorm_st_context;
+ struct regpair pstorm_st_padding[2];
+ struct xstorm_toe_conn_st_ctx xstorm_st_context;
+ struct regpair xstorm_st_padding[2];
+ struct ystorm_toe_conn_ag_ctx ystorm_ag_context;
+ struct xstorm_toe_conn_ag_ctx xstorm_ag_context;
+ struct tstorm_toe_conn_ag_ctx tstorm_ag_context;
+ struct regpair tstorm_ag_padding[2];
+ struct timers_context timer_context;
+ struct ustorm_toe_conn_ag_ctx ustorm_ag_context;
+ struct tstorm_toe_conn_st_ctx tstorm_st_context;
+ struct mstorm_toe_conn_st_ctx mstorm_st_context;
+ struct ustorm_toe_conn_st_ctx ustorm_st_context;
+};
+
+/* toe init ramrod header */
+struct toe_init_ramrod_header {
+ u8 first_rss;
+ u8 num_rss;
+ u8 reserved[6];
+};
+
+/* toe pf init parameters */
+struct toe_pf_init_params {
+ __le32 push_timeout;
+ __le16 grq_buffer_size;
+ __le16 grq_sb_id;
+ u8 grq_sb_index;
+ u8 max_seg_retransmit;
+ u8 doubt_reachability;
+ u8 ll2_rx_queue_id;
+ __le16 grq_fetch_threshold;
+ u8 reserved1[2];
+ struct regpair grq_page_addr;
+};
+
+/* toe tss parameters */
+struct toe_tss_params {
+ struct regpair curr_page_addr;
+ struct regpair next_page_addr;
+ u8 reserved0;
+ u8 status_block_index;
+ __le16 status_block_id;
+ __le16 reserved1[2];
+};
+
+/* toe rss parameters */
+struct toe_rss_params {
+ struct regpair curr_page_addr;
+ struct regpair next_page_addr;
+ u8 reserved0;
+ u8 status_block_index;
+ __le16 status_block_id;
+ __le16 reserved1[2];
+};
+
+/* toe init ramrod data */
+struct toe_init_ramrod_data {
+ struct toe_init_ramrod_header hdr;
+ struct tcp_init_params tcp_params;
+ struct toe_pf_init_params pf_params;
+ struct toe_tss_params tss_params[TOE_TX_MAX_TSS_CHAINS];
+ struct toe_rss_params rss_params[TOE_RX_MAX_RSS_CHAINS];
+};
+
+/* toe offload parameters */
+struct toe_offload_params {
+ struct regpair tx_bd_page_addr;
+ struct regpair tx_app_page_addr;
+ __le32 more_to_send_seq;
+ __le16 rcv_indication_size;
+ u8 rss_tss_id;
+ u8 ignore_grq_push;
+ struct regpair rx_db_data_ptr;
+};
+
+/* TOE offload ramrod data - DMAed by firmware */
+struct toe_offload_ramrod_data {
+ struct tcp_offload_params tcp_ofld_params;
+ struct toe_offload_params toe_ofld_params;
+};
+
+/* TOE ramrod command IDs */
+enum toe_ramrod_cmd_id {
+ TOE_RAMROD_UNUSED,
+ TOE_RAMROD_FUNC_INIT,
+ TOE_RAMROD_INITATE_OFFLOAD,
+ TOE_RAMROD_FUNC_CLOSE,
+ TOE_RAMROD_SEARCHER_DELETE,
+ TOE_RAMROD_TERMINATE,
+ TOE_RAMROD_QUERY,
+ TOE_RAMROD_UPDATE,
+ TOE_RAMROD_EMPTY,
+ TOE_RAMROD_RESET_SEND,
+ TOE_RAMROD_INVALIDATE,
+ MAX_TOE_RAMROD_CMD_ID
+};
+
+/* Toe RQ buffer descriptor */
+struct toe_rx_bd {
+ struct regpair addr;
+ __le16 size;
+ __le16 flags;
+#define TOE_RX_BD_START_MASK 0x1
+#define TOE_RX_BD_START_SHIFT 0
+#define TOE_RX_BD_END_MASK 0x1
+#define TOE_RX_BD_END_SHIFT 1
+#define TOE_RX_BD_NO_PUSH_MASK 0x1
+#define TOE_RX_BD_NO_PUSH_SHIFT 2
+#define TOE_RX_BD_SPLIT_MASK 0x1
+#define TOE_RX_BD_SPLIT_SHIFT 3
+#define TOE_RX_BD_RESERVED0_MASK 0xFFF
+#define TOE_RX_BD_RESERVED0_SHIFT 4
+ __le32 reserved1;
+};
+
+/* TOE RX completion queue opcodes (opcode 0 is illegal) */
+enum toe_rx_cmp_opcode {
+ TOE_RX_CMP_OPCODE_GA = 1,
+ TOE_RX_CMP_OPCODE_GR = 2,
+ TOE_RX_CMP_OPCODE_GNI = 3,
+ TOE_RX_CMP_OPCODE_GAIR = 4,
+ TOE_RX_CMP_OPCODE_GAIL = 5,
+ TOE_RX_CMP_OPCODE_GRI = 6,
+ TOE_RX_CMP_OPCODE_GJ = 7,
+ TOE_RX_CMP_OPCODE_DGI = 8,
+ TOE_RX_CMP_OPCODE_CMP = 9,
+ TOE_RX_CMP_OPCODE_REL = 10,
+ TOE_RX_CMP_OPCODE_SKP = 11,
+ TOE_RX_CMP_OPCODE_URG = 12,
+ TOE_RX_CMP_OPCODE_RT_TO = 13,
+ TOE_RX_CMP_OPCODE_KA_TO = 14,
+ TOE_RX_CMP_OPCODE_MAX_RT = 15,
+ TOE_RX_CMP_OPCODE_DBT_RE = 16,
+ TOE_RX_CMP_OPCODE_SYN = 17,
+ TOE_RX_CMP_OPCODE_OPT_ERR = 18,
+ TOE_RX_CMP_OPCODE_FW2_TO = 19,
+ TOE_RX_CMP_OPCODE_2WY_CLS = 20,
+ TOE_RX_CMP_OPCODE_RST_RCV = 21,
+ TOE_RX_CMP_OPCODE_FIN_RCV = 22,
+ TOE_RX_CMP_OPCODE_FIN_UPL = 23,
+ TOE_RX_CMP_OPCODE_INIT = 32,
+ TOE_RX_CMP_OPCODE_RSS_UPDATE = 33,
+ TOE_RX_CMP_OPCODE_CLOSE = 34,
+ TOE_RX_CMP_OPCODE_INITIATE_OFFLOAD = 80,
+ TOE_RX_CMP_OPCODE_SEARCHER_DELETE = 81,
+ TOE_RX_CMP_OPCODE_TERMINATE = 82,
+ TOE_RX_CMP_OPCODE_QUERY = 83,
+ TOE_RX_CMP_OPCODE_RESET_SEND = 84,
+ TOE_RX_CMP_OPCODE_INVALIDATE = 85,
+ TOE_RX_CMP_OPCODE_EMPTY = 86,
+ TOE_RX_CMP_OPCODE_UPDATE = 87,
+ MAX_TOE_RX_CMP_OPCODE
+};
+
+/* TOE rx ooo completion data */
+struct toe_rx_cqe_ooo_params {
+ __le32 nbytes;
+ __le16 grq_buff_id;
+ u8 isle_num;
+ u8 reserved0;
+};
+
+/* TOE rx in order completion data */
+struct toe_rx_cqe_in_order_params {
+ __le32 nbytes;
+ __le16 grq_buff_id;
+ __le16 reserved1;
+};
+
+/* Union for TOE rx completion data */
+union toe_rx_cqe_data_union {
+ struct toe_rx_cqe_ooo_params ooo_params;
+ struct toe_rx_cqe_in_order_params in_order_params;
+ struct regpair raw_data;
+};
+
+/* TOE rx completion element */
+struct toe_rx_cqe {
+ __le16 icid;
+ u8 completion_opcode;
+ u8 reserved0;
+ __le32 reserved1;
+ union toe_rx_cqe_data_union data;
+};
+
+/* toe RX doorbel data */
+struct toe_rx_db_data {
+ __le32 local_adv_wnd_seq;
+ __le32 reserved[3];
+};
+
+/* Toe GRQ buffer descriptor */
+struct toe_rx_grq_bd {
+ struct regpair addr;
+ __le16 buff_id;
+ __le16 reserved0;
+ __le32 reserved1;
+};
+
+/* Toe transmission application buffer descriptor */
+struct toe_tx_app_buff_desc {
+ __le32 next_buffer_start_seq;
+ __le32 reserved;
+};
+
+/* Toe transmission application buffer descriptor page pointer */
+struct toe_tx_app_buff_page_pointer {
+ struct regpair next_page_addr;
+};
+
+/* Toe transmission buffer descriptor */
+struct toe_tx_bd {
+ struct regpair addr;
+ __le16 size;
+ __le16 flags;
+#define TOE_TX_BD_PUSH_MASK 0x1
+#define TOE_TX_BD_PUSH_SHIFT 0
+#define TOE_TX_BD_NOTIFY_MASK 0x1
+#define TOE_TX_BD_NOTIFY_SHIFT 1
+#define TOE_TX_BD_LARGE_IO_MASK 0x1
+#define TOE_TX_BD_LARGE_IO_SHIFT 2
+#define TOE_TX_BD_BD_CONS_MASK 0x1FFF
+#define TOE_TX_BD_BD_CONS_SHIFT 3
+ __le32 next_bd_start_seq;
+};
+
+/* TOE completion opcodes */
+enum toe_tx_cmp_opcode {
+ TOE_TX_CMP_OPCODE_DATA,
+ TOE_TX_CMP_OPCODE_TERMINATE,
+ TOE_TX_CMP_OPCODE_EMPTY,
+ TOE_TX_CMP_OPCODE_RESET_SEND,
+ TOE_TX_CMP_OPCODE_INVALIDATE,
+ TOE_TX_CMP_OPCODE_RST_RCV,
+ MAX_TOE_TX_CMP_OPCODE
+};
+
+/* Toe transmission completion element */
+struct toe_tx_cqe {
+ __le16 icid;
+ u8 opcode;
+ u8 reserved;
+ __le32 size;
+};
+
+/* Toe transmission page pointer bd */
+struct toe_tx_page_pointer_bd {
+ struct regpair next_page_addr;
+ struct regpair prev_page_addr;
+};
+
+/* Toe transmission completion element page pointer */
+struct toe_tx_page_pointer_cqe {
+ struct regpair next_page_addr;
+};
+
+/* toe update parameters */
+struct toe_update_params {
+ __le16 flags;
+#define TOE_UPDATE_PARAMS_RCV_INDICATION_SIZE_CHANGED_MASK 0x1
+#define TOE_UPDATE_PARAMS_RCV_INDICATION_SIZE_CHANGED_SHIFT 0
+#define TOE_UPDATE_PARAMS_RESERVED_MASK 0x7FFF
+#define TOE_UPDATE_PARAMS_RESERVED_SHIFT 1
+ __le16 rcv_indication_size;
+ __le16 reserved1[2];
+};
+
+/* TOE update ramrod data - DMAed by firmware */
+struct toe_update_ramrod_data {
+ struct tcp_update_params tcp_upd_params;
+ struct toe_update_params toe_upd_params;
+};
+
+struct mstorm_toe_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define MSTORM_TOE_CONN_AG_CTX_BIT0_MASK 0x1
+#define MSTORM_TOE_CONN_AG_CTX_BIT0_SHIFT 0
+#define MSTORM_TOE_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_TOE_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_TOE_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_TOE_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_TOE_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_TOE_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_TOE_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_TOE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define MSTORM_TOE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_TOE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_TOE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_TOE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_TOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_TOE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_TOE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_TOE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_TOE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_TOE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_TOE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_TOE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_TOE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_TOE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_TOE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_TOE_CONN_AG_CTX_RULE4EN_SHIFT 7
+ __le16 word0;
+ __le16 word1;
+ __le32 reg0;
+ __le32 reg1;
+};
+
+/* TOE doorbell data */
+struct toe_db_data {
+ u8 params;
+#define TOE_DB_DATA_DEST_MASK 0x3
+#define TOE_DB_DATA_DEST_SHIFT 0
+#define TOE_DB_DATA_AGG_CMD_MASK 0x3
+#define TOE_DB_DATA_AGG_CMD_SHIFT 2
+#define TOE_DB_DATA_BYPASS_EN_MASK 0x1
+#define TOE_DB_DATA_BYPASS_EN_SHIFT 4
+#define TOE_DB_DATA_RESERVED_MASK 0x1
+#define TOE_DB_DATA_RESERVED_SHIFT 5
+#define TOE_DB_DATA_AGG_VAL_SEL_MASK 0x3
+#define TOE_DB_DATA_AGG_VAL_SEL_SHIFT 6
+ u8 agg_flags;
+ __le16 bd_prod;
+};
+
+/* rdma function init ramrod data */
+struct rdma_close_func_ramrod_data {
+ u8 cnq_start_offset;
+ u8 num_cnqs;
+ u8 vf_id;
+ u8 vf_valid;
+ u8 reserved[4];
+};
+
+/* rdma function init CNQ parameters */
+struct rdma_cnq_params {
+ __le16 sb_num;
+ u8 sb_index;
+ u8 num_pbl_pages;
+ __le32 reserved;
+ struct regpair pbl_base_addr;
+ __le16 queue_zone_num;
+ u8 reserved1[6];
+};
+
+/* rdma create cq ramrod data */
+struct rdma_create_cq_ramrod_data {
+ struct regpair cq_handle;
+ struct regpair pbl_addr;
+ __le32 max_cqes;
+ __le16 pbl_num_pages;
+ __le16 dpi;
+ u8 is_two_level_pbl;
+ u8 cnq_id;
+ u8 pbl_log_page_size;
+ u8 toggle_bit;
+ __le16 int_timeout;
+ u8 vf_id;
+ u8 flags;
+#define RDMA_CREATE_CQ_RAMROD_DATA_VF_ID_VALID_MASK 0x1
+#define RDMA_CREATE_CQ_RAMROD_DATA_VF_ID_VALID_SHIFT 0
+#define RDMA_CREATE_CQ_RAMROD_DATA_RESERVED1_MASK 0x7F
+#define RDMA_CREATE_CQ_RAMROD_DATA_RESERVED1_SHIFT 1
+};
+
+/* rdma deregister tid ramrod data */
+struct rdma_deregister_tid_ramrod_data {
+ __le32 itid;
+ __le32 reserved;
+};
+
+/* rdma destroy cq output params */
+struct rdma_destroy_cq_output_params {
+ __le16 cnq_num;
+ __le16 reserved0;
+ __le32 reserved1;
+};
+
+/* rdma destroy cq ramrod data */
+struct rdma_destroy_cq_ramrod_data {
+ struct regpair output_params_addr;
+};
+
+/* RDMA slow path EQ cmd IDs */
+enum rdma_event_opcode {
+ RDMA_EVENT_UNUSED,
+ RDMA_EVENT_FUNC_INIT,
+ RDMA_EVENT_FUNC_CLOSE,
+ RDMA_EVENT_REGISTER_MR,
+ RDMA_EVENT_DEREGISTER_MR,
+ RDMA_EVENT_CREATE_CQ,
+ RDMA_EVENT_RESIZE_CQ,
+ RDMA_EVENT_DESTROY_CQ,
+ RDMA_EVENT_CREATE_SRQ,
+ RDMA_EVENT_MODIFY_SRQ,
+ RDMA_EVENT_DESTROY_SRQ,
+ RDMA_EVENT_START_NAMESPACE_TRACKING,
+ RDMA_EVENT_STOP_NAMESPACE_TRACKING,
+ MAX_RDMA_EVENT_OPCODE
+};
+
+/* RDMA FW return code for slow path ramrods */
+enum rdma_fw_return_code {
+ RDMA_RETURN_OK = 0,
+ RDMA_RETURN_REGISTER_MR_BAD_STATE_ERR,
+ RDMA_RETURN_DEREGISTER_MR_BAD_STATE_ERR,
+ RDMA_RETURN_RESIZE_CQ_ERR,
+ RDMA_RETURN_NIG_DRAIN_REQ,
+ RDMA_RETURN_GENERAL_ERR,
+ MAX_RDMA_FW_RETURN_CODE
+};
+
+/* rdma function init header */
+struct rdma_init_func_hdr {
+ u8 cnq_start_offset;
+ u8 num_cnqs;
+ u8 cq_ring_mode;
+ u8 vf_id;
+ u8 vf_valid;
+ u8 relaxed_ordering;
+ __le16 first_reg_srq_id;
+ __le32 reg_srq_base_addr;
+ u8 flags;
+#define RDMA_INIT_FUNC_HDR_SEARCHER_MODE_MASK 0x1
+#define RDMA_INIT_FUNC_HDR_SEARCHER_MODE_SHIFT 0
+#define RDMA_INIT_FUNC_HDR_PVRDMA_MODE_MASK 0x1
+#define RDMA_INIT_FUNC_HDR_PVRDMA_MODE_SHIFT 1
+#define RDMA_INIT_FUNC_HDR_DPT_MODE_MASK 0x1
+#define RDMA_INIT_FUNC_HDR_DPT_MODE_SHIFT 2
+#define RDMA_INIT_FUNC_HDR_RESERVED0_MASK 0x1F
+#define RDMA_INIT_FUNC_HDR_RESERVED0_SHIFT 3
+ u8 dpt_byte_threshold_log;
+ u8 dpt_common_queue_id;
+ u8 max_num_ns_log;
+};
+
+/* rdma function init ramrod data */
+struct rdma_init_func_ramrod_data {
+ struct rdma_init_func_hdr params_header;
+ struct rdma_cnq_params dptq_params;
+ struct rdma_cnq_params cnq_params[NUM_OF_GLOBAL_QUEUES];
+};
+
+/* rdma namespace tracking ramrod data */
+struct rdma_namespace_tracking_ramrod_data {
+ u8 name_space;
+ u8 reserved[7];
+};
+
+/* RDMA ramrod command IDs */
+enum rdma_ramrod_cmd_id {
+ RDMA_RAMROD_UNUSED,
+ RDMA_RAMROD_FUNC_INIT,
+ RDMA_RAMROD_FUNC_CLOSE,
+ RDMA_RAMROD_REGISTER_MR,
+ RDMA_RAMROD_DEREGISTER_MR,
+ RDMA_RAMROD_CREATE_CQ,
+ RDMA_RAMROD_RESIZE_CQ,
+ RDMA_RAMROD_DESTROY_CQ,
+ RDMA_RAMROD_CREATE_SRQ,
+ RDMA_RAMROD_MODIFY_SRQ,
+ RDMA_RAMROD_DESTROY_SRQ,
+ RDMA_RAMROD_START_NS_TRACKING,
+ RDMA_RAMROD_STOP_NS_TRACKING,
+ MAX_RDMA_RAMROD_CMD_ID
+};
+
+/* rdma register tid ramrod data */
+struct rdma_register_tid_ramrod_data {
+ __le16 flags;
+#define RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG_MASK 0x1F
+#define RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG_SHIFT 0
+#define RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL_SHIFT 5
+#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_SHIFT 6
+#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_SHIFT 7
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_SHIFT 8
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_SHIFT 9
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC_SHIFT 10
+#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_SHIFT 11
+#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_SHIFT 12
+#define RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND_SHIFT 13
+#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED_MASK 0x3
+#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED_SHIFT 14
+ u8 flags1;
+#define RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG_MASK 0x1F
+#define RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG_SHIFT 0
+#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_MASK 0x7
+#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_SHIFT 5
+ u8 flags2;
+#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_SHIFT 0
+#define RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG_SHIFT 1
+#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_MASK 0x3F
+#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_SHIFT 2
+ u8 key;
+ u8 length_hi;
+ u8 vf_id;
+ u8 vf_valid;
+ __le16 pd;
+ __le16 reserved2;
+ __le32 length_lo;
+ __le32 itid;
+ __le32 reserved3;
+ struct regpair va;
+ struct regpair pbl_base;
+ struct regpair dif_error_addr;
+ __le32 reserved4[4];
+};
+
+/* rdma resize cq output params */
+struct rdma_resize_cq_output_params {
+ __le32 old_cq_cons;
+ __le32 old_cq_prod;
+};
+
+/* rdma resize cq ramrod data */
+struct rdma_resize_cq_ramrod_data {
+ u8 flags;
+#define RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_MASK 0x1
+#define RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_SHIFT 0
+#define RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL_MASK 0x1
+#define RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL_SHIFT 1
+#define RDMA_RESIZE_CQ_RAMROD_DATA_VF_ID_VALID_MASK 0x1
+#define RDMA_RESIZE_CQ_RAMROD_DATA_VF_ID_VALID_SHIFT 2
+#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_MASK 0x1F
+#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_SHIFT 3
+ u8 pbl_log_page_size;
+ __le16 pbl_num_pages;
+ __le32 max_cqes;
+ struct regpair pbl_addr;
+ struct regpair output_params_addr;
+ u8 vf_id;
+ u8 reserved1[7];
+};
+
+/* The rdma SRQ context */
+struct rdma_srq_context {
+ struct regpair temp[8];
+};
+
+/* rdma create qp requester ramrod data */
+struct rdma_srq_create_ramrod_data {
+ u8 flags;
+#define RDMA_SRQ_CREATE_RAMROD_DATA_XRC_FLAG_MASK 0x1
+#define RDMA_SRQ_CREATE_RAMROD_DATA_XRC_FLAG_SHIFT 0
+#define RDMA_SRQ_CREATE_RAMROD_DATA_RESERVED_KEY_EN_MASK 0x1
+#define RDMA_SRQ_CREATE_RAMROD_DATA_RESERVED_KEY_EN_SHIFT 1
+#define RDMA_SRQ_CREATE_RAMROD_DATA_RESERVED1_MASK 0x3F
+#define RDMA_SRQ_CREATE_RAMROD_DATA_RESERVED1_SHIFT 2
+ u8 reserved2;
+ __le16 xrc_domain;
+ __le32 xrc_srq_cq_cid;
+ struct regpair pbl_base_addr;
+ __le16 pages_in_srq_pbl;
+ __le16 pd_id;
+ struct rdma_srq_id srq_id;
+ __le16 page_size;
+ __le16 reserved3;
+ __le32 reserved4;
+ struct regpair producers_addr;
+};
+
+/* rdma create qp requester ramrod data */
+struct rdma_srq_destroy_ramrod_data {
+ struct rdma_srq_id srq_id;
+ __le32 reserved;
+};
+
+/* rdma create qp requester ramrod data */
+struct rdma_srq_modify_ramrod_data {
+ struct rdma_srq_id srq_id;
+ __le32 wqe_limit;
+};
+
+/* RDMA Tid type enumeration (for register_tid ramrod) */
+enum rdma_tid_type {
+ RDMA_TID_REGISTERED_MR,
+ RDMA_TID_FMR,
+ RDMA_TID_MW,
+ MAX_RDMA_TID_TYPE
+};
+
+/* The rdma XRC SRQ context */
+struct rdma_xrc_srq_context {
+ struct regpair temp[9];
+};
+
+struct tstorm_rdma_task_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ __le16 word0;
+ u8 flags0;
+#define TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_MASK 0xF
+#define TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_SHIFT 0
+#define TSTORM_RDMA_TASK_AG_CTX_BIT0_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_BIT0_SHIFT 4
+#define TSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
+#define TSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6
+#define TSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7
+ u8 flags1;
+#define TSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0
+#define TSTORM_RDMA_TASK_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_BIT5_SHIFT 1
+#define TSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 2
+#define TSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 4
+#define TSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 6
+ u8 flags2;
+#define TSTORM_RDMA_TASK_AG_CTX_CF3_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF3_SHIFT 0
+#define TSTORM_RDMA_TASK_AG_CTX_CF4_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF4_SHIFT 2
+#define TSTORM_RDMA_TASK_AG_CTX_CF5_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF5_SHIFT 4
+#define TSTORM_RDMA_TASK_AG_CTX_CF6_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF6_SHIFT 6
+ u8 flags3;
+#define TSTORM_RDMA_TASK_AG_CTX_CF7_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF7_SHIFT 0
+#define TSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 2
+#define TSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 3
+#define TSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 4
+#define TSTORM_RDMA_TASK_AG_CTX_CF3EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT 5
+#define TSTORM_RDMA_TASK_AG_CTX_CF4EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF4EN_SHIFT 6
+#define TSTORM_RDMA_TASK_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF5EN_SHIFT 7
+ u8 flags4;
+#define TSTORM_RDMA_TASK_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF6EN_SHIFT 0
+#define TSTORM_RDMA_TASK_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF7EN_SHIFT 1
+#define TSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 2
+#define TSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 3
+#define TSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 4
+#define TSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 5
+#define TSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 6
+#define TSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 7
+ u8 byte2;
+ __le16 word1;
+ __le32 reg0;
+ u8 byte3;
+ u8 byte4;
+ __le16 word2;
+ __le16 word3;
+ __le16 word4;
+ __le32 reg1;
+ __le32 reg2;
+};
+
+struct ustorm_rdma_conn_ag_ctx {
+ u8 reserved;
+ u8 byte1;
+ u8 flags0;
+#define USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define USTORM_RDMA_CONN_AG_CTX_DIF_ERROR_REPORTED_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_DIF_ERROR_REPORTED_SHIFT 1
+#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 2
+#define USTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
+#define USTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define USTORM_RDMA_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_RDMA_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4
+#define USTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3
+#define USTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 6
+ u8 flags2;
+#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
+#define USTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_RDMA_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5
+#define USTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_SHIFT 7
+ u8 flags3;
+#define USTORM_RDMA_CONN_AG_CTX_CQ_EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CQ_EN_SHIFT 0
+#define USTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT 7
+ u8 byte2;
+ u8 nvmf_only;
+ __le16 conn_dpi;
+ __le16 word1;
+ __le32 cq_cons;
+ __le32 cq_se_prod;
+ __le32 cq_prod;
+ __le32 reg3;
+ __le16 int_timeout;
+ __le16 word3;
+};
+
+struct xstorm_roce_conn_ag_ctx {
+ u8 reserved0;
+ u8 state;
+ u8 flags0;
+#define XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT2_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT2_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_ROCE_CONN_AG_CTX_BIT4_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT4_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_BIT5_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT5_SHIFT 5
+#define XSTORM_ROCE_CONN_AG_CTX_BIT6_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT6_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_BIT7_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT7_SHIFT 7
+ u8 flags1;
+#define XSTORM_ROCE_CONN_AG_CTX_BIT8_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT8_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_BIT9_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT9_SHIFT 1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT10_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT10_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_ROCE_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_MSDM_FLUSH_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_MSEM_FLUSH_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_MSEM_FLUSH_SHIFT 5
+#define XSTORM_ROCE_CONN_AG_CTX_BIT14_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT14_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7
+ u8 flags2;
+#define XSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF3_SHIFT 6
+ u8 flags3;
+#define XSTORM_ROCE_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+ u8 flags4;
+#define XSTORM_ROCE_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF11_SHIFT 6
+ u8 flags5;
+#define XSTORM_ROCE_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF15_SHIFT 6
+ u8 flags6;
+#define XSTORM_ROCE_CONN_AG_CTX_CF16_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF16_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_CF18_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF18_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_CF19_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF19_SHIFT 6
+ u8 flags7;
+#define XSTORM_ROCE_CONN_AG_CTX_CF20_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF20_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_CF21_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF21_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 7
+ u8 flags8;
+#define XSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_ROCE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_ROCE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
+#define XSTORM_ROCE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF9EN_SHIFT 7
+ u8 flags9;
+#define XSTORM_ROCE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_ROCE_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_ROCE_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_ROCE_CONN_AG_CTX_CF16EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF16EN_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF17EN_SHIFT 7
+ u8 flags10;
+#define XSTORM_ROCE_CONN_AG_CTX_CF18EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF18EN_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_CF19EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF19EN_SHIFT 1
+#define XSTORM_ROCE_CONN_AG_CTX_CF20EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF20EN_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_CF21EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF21EN_SHIFT 3
+#define XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_CF23EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF23EN_SHIFT 5
+#define XSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 7
+ u8 flags11;
+#define XSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_ROCE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE9EN_SHIFT 7
+ u8 flags12;
+#define XSTORM_ROCE_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_ROCE_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_ROCE_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE17EN_SHIFT 7
+ u8 flags13;
+#define XSTORM_ROCE_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define XSTORM_ROCE_CONN_AG_CTX_MIGRATION_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_MIGRATION_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_BIT17_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_ROCE_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_RESERVED_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RESERVED_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define XSTORM_ROCE_CONN_AG_CTX_CF23_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF23_SHIFT 6
+ u8 byte2;
+ __le16 physical_q0;
+ __le16 word1;
+ __le16 word2;
+ __le16 word3;
+ __le16 word4;
+ __le16 word5;
+ __le16 conn_dpi;
+ u8 byte3;
+ u8 byte4;
+ u8 byte5;
+ u8 byte6;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 snd_nxt_psn;
+ __le32 reg4;
+ __le32 reg5;
+ __le32 reg6;
+};
+
+struct tstorm_roce_conn_ag_ctx {
+ u8 reserved0;
+ u8 byte1;
+ u8 flags0;
+#define TSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define TSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_ROCE_CONN_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_ROCE_CONN_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_ROCE_CONN_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_ROCE_CONN_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3
+#define TSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 6
+ u8 flags1;
+#define TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3
+#define TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
+#define TSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3
+#define TSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 2
+#define TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3
+#define TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4
+#define TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+ u8 flags2;
+#define TSTORM_ROCE_CONN_AG_CTX_CF5_MASK 0x3
+#define TSTORM_ROCE_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_ROCE_CONN_AG_CTX_CF6_MASK 0x3
+#define TSTORM_ROCE_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_ROCE_CONN_AG_CTX_CF7_MASK 0x3
+#define TSTORM_ROCE_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_ROCE_CONN_AG_CTX_CF8_MASK 0x3
+#define TSTORM_ROCE_CONN_AG_CTX_CF8_SHIFT 6
+ u8 flags3;
+#define TSTORM_ROCE_CONN_AG_CTX_CF9_MASK 0x3
+#define TSTORM_ROCE_CONN_AG_CTX_CF9_SHIFT 0
+#define TSTORM_ROCE_CONN_AG_CTX_CF10_MASK 0x3
+#define TSTORM_ROCE_CONN_AG_CTX_CF10_SHIFT 2
+#define TSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 5
+#define TSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 6
+#define TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
+ u8 flags4;
+#define TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
+#define TSTORM_ROCE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_ROCE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_ROCE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_ROCE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_ROCE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_CF9EN_SHIFT 5
+#define TSTORM_ROCE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_CF10EN_SHIFT 6
+#define TSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags5;
+#define TSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_ROCE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_ROCE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define TSTORM_ROCE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_ROCE_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_RULE8EN_SHIFT 7
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le32 reg4;
+ __le32 reg5;
+ __le32 reg6;
+ __le32 reg7;
+ __le32 reg8;
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ u8 byte4;
+ u8 byte5;
+ __le16 word1;
+ __le16 word2;
+ __le16 word3;
+ __le32 reg9;
+ __le32 reg10;
+};
+
+/* The roce storm context of Ystorm */
+struct ystorm_roce_conn_st_ctx {
+ struct regpair temp[2];
+};
+
+/* The roce storm context of Mstorm */
+struct pstorm_roce_conn_st_ctx {
+ struct regpair temp[16];
+};
+
+/* The roce storm context of Xstorm */
+struct xstorm_roce_conn_st_ctx {
+ struct regpair temp[24];
+};
+
+/* The roce storm context of Tstorm */
+struct tstorm_roce_conn_st_ctx {
+ struct regpair temp[30];
+};
+
+/* The roce storm context of Mstorm */
+struct mstorm_roce_conn_st_ctx {
+ struct regpair temp[6];
+};
+
+/* The roce storm context of Ustorm */
+struct ustorm_roce_conn_st_ctx {
+ struct regpair temp[14];
+};
+
+/* roce connection context */
+struct roce_conn_context {
+ struct ystorm_roce_conn_st_ctx ystorm_st_context;
+ struct regpair ystorm_st_padding[2];
+ struct pstorm_roce_conn_st_ctx pstorm_st_context;
+ struct xstorm_roce_conn_st_ctx xstorm_st_context;
+ struct xstorm_roce_conn_ag_ctx xstorm_ag_context;
+ struct tstorm_roce_conn_ag_ctx tstorm_ag_context;
+ struct timers_context timer_context;
+ struct ustorm_rdma_conn_ag_ctx ustorm_ag_context;
+ struct tstorm_roce_conn_st_ctx tstorm_st_context;
+ struct regpair tstorm_st_padding[2];
+ struct mstorm_roce_conn_st_ctx mstorm_st_context;
+ struct regpair mstorm_st_padding[2];
+ struct ustorm_roce_conn_st_ctx ustorm_st_context;
+ struct regpair ustorm_st_padding[2];
+};
+
+/* roce cqes statistics */
+struct roce_cqe_stats {
+ __le32 req_cqe_error;
+ __le32 req_remote_access_errors;
+ __le32 req_remote_invalid_request;
+ __le32 resp_cqe_error;
+ __le32 resp_local_length_error;
+ __le32 reserved;
+};
+
+/* roce create qp requester ramrod data */
+struct roce_create_qp_req_ramrod_data {
+ __le16 flags;
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR_MASK 0x3
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR_SHIFT 0
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN_MASK 0x1
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN_SHIFT 2
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP_MASK 0x1
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP_SHIFT 3
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_PRI_MASK 0x7
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_PRI_SHIFT 4
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_XRC_FLAG_MASK 0x1
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_XRC_FLAG_SHIFT 7
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_MASK 0xF
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_SHIFT 8
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_MASK 0xF
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_SHIFT 12
+ u8 max_ord;
+ u8 traffic_class;
+ u8 hop_limit;
+ u8 orq_num_pages;
+ __le16 p_key;
+ __le32 flow_label;
+ __le32 dst_qp_id;
+ __le32 ack_timeout_val;
+ __le32 initial_psn;
+ __le16 mtu;
+ __le16 pd;
+ __le16 sq_num_pages;
+ __le16 low_latency_phy_queue;
+ struct regpair sq_pbl_addr;
+ struct regpair orq_pbl_addr;
+ __le16 local_mac_addr[3];
+ __le16 remote_mac_addr[3];
+ __le16 vlan_id;
+ __le16 udp_src_port;
+ __le32 src_gid[4];
+ __le32 dst_gid[4];
+ __le32 cq_cid;
+ struct regpair qp_handle_for_cqe;
+ struct regpair qp_handle_for_async;
+ u8 stats_counter_id;
+ u8 vf_id;
+ u8 vport_id;
+ u8 flags2;
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_EDPM_MODE_MASK 0x1
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_EDPM_MODE_SHIFT 0
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_VF_ID_VALID_MASK 0x1
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_VF_ID_VALID_SHIFT 1
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FORCE_LB_MASK 0x1
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FORCE_LB_SHIFT 2
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_MASK 0x1F
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_SHIFT 3
+ u8 name_space;
+ u8 reserved3[3];
+ __le16 regular_latency_phy_queue;
+ __le16 dpi;
+};
+
+/* roce create qp responder ramrod data */
+struct roce_create_qp_resp_ramrod_data {
+ __le32 flags;
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR_MASK 0x3
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR_SHIFT 0
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN_SHIFT 2
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN_SHIFT 3
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN_MASK 0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN_SHIFT 4
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG_MASK 0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG_SHIFT 5
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_MASK 0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_SHIFT 6
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN_MASK 0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN_SHIFT 7
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_MASK 0x7
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_SHIFT 8
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_MASK 0x1F
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_SHIFT 11
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_XRC_FLAG_MASK 0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_XRC_FLAG_SHIFT 16
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_VF_ID_VALID_MASK 0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_VF_ID_VALID_SHIFT 17
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_FORCE_LB_MASK 0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_FORCE_LB_SHIFT 18
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_MASK 0x1FFF
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_SHIFT 19
+ __le16 xrc_domain;
+ u8 max_ird;
+ u8 traffic_class;
+ u8 hop_limit;
+ u8 irq_num_pages;
+ __le16 p_key;
+ __le32 flow_label;
+ __le32 dst_qp_id;
+ u8 stats_counter_id;
+ u8 reserved1;
+ __le16 mtu;
+ __le32 initial_psn;
+ __le16 pd;
+ __le16 rq_num_pages;
+ struct rdma_srq_id srq_id;
+ struct regpair rq_pbl_addr;
+ struct regpair irq_pbl_addr;
+ __le16 local_mac_addr[3];
+ __le16 remote_mac_addr[3];
+ __le16 vlan_id;
+ __le16 udp_src_port;
+ __le32 src_gid[4];
+ __le32 dst_gid[4];
+ struct regpair qp_handle_for_cqe;
+ struct regpair qp_handle_for_async;
+ __le16 low_latency_phy_queue;
+ u8 vf_id;
+ u8 vport_id;
+ __le32 cq_cid;
+ __le16 regular_latency_phy_queue;
+ __le16 dpi;
+ __le32 src_qp_id;
+ u8 name_space;
+ u8 reserved3[3];
+};
+
+/* RoCE Create Suspended qp requester runtime ramrod data */
+struct roce_create_suspended_qp_req_runtime_ramrod_data {
+ __le32 flags;
+#define ROCE_CREATE_SUSPENDED_QP_REQ_RUNTIME_RAMROD_DATA_ERR_FLG_MASK 0x1
+#define ROCE_CREATE_SUSPENDED_QP_REQ_RUNTIME_RAMROD_DATA_ERR_FLG_SHIFT 0
+#define ROCE_CREATE_SUSPENDED_QP_REQ_RUNTIME_RAMROD_DATA_RESERVED0_MASK \
+ 0x7FFFFFFF
+#define ROCE_CREATE_SUSPENDED_QP_REQ_RUNTIME_RAMROD_DATA_RESERVED0_SHIFT 1
+ __le32 send_msg_psn;
+ __le32 inflight_sends;
+ __le32 ssn;
+};
+
+/* RoCE Create Suspended QP requester ramrod data */
+struct roce_create_suspended_qp_req_ramrod_data {
+ struct roce_create_qp_req_ramrod_data qp_params;
+ struct roce_create_suspended_qp_req_runtime_ramrod_data
+ qp_runtime_params;
+};
+
+/* RoCE Create Suspended QP responder runtime params */
+struct roce_create_suspended_qp_resp_runtime_params {
+ __le32 flags;
+#define ROCE_CREATE_SUSPENDED_QP_RESP_RUNTIME_PARAMS_ERR_FLG_MASK 0x1
+#define ROCE_CREATE_SUSPENDED_QP_RESP_RUNTIME_PARAMS_ERR_FLG_SHIFT 0
+#define ROCE_CREATE_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RDMA_ACTIVE_MASK 0x1
+#define ROCE_CREATE_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RDMA_ACTIVE_SHIFT 1
+#define ROCE_CREATE_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RESERVED0_MASK 0x3FFFFFFF
+#define ROCE_CREATE_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RESERVED0_SHIFT 2
+ __le32 receive_msg_psn;
+ __le32 inflight_receives;
+ __le32 rmsn;
+ __le32 rdma_key;
+ struct regpair rdma_va;
+ __le32 rdma_length;
+ __le32 num_rdb_entries;
+ __le32 resreved;
+};
+
+/* RoCE RDB array entry */
+struct roce_resp_qp_rdb_entry {
+ struct regpair atomic_data;
+ struct regpair va;
+ __le32 psn;
+ __le32 rkey;
+ __le32 byte_count;
+ u8 op_type;
+ u8 reserved[3];
+};
+
+/* RoCE Create Suspended QP responder runtime ramrod data */
+struct roce_create_suspended_qp_resp_runtime_ramrod_data {
+ struct roce_create_suspended_qp_resp_runtime_params params;
+ struct roce_resp_qp_rdb_entry
+ rdb_array_entries[RDMA_MAX_IRQ_ELEMS_IN_PAGE];
+};
+
+/* RoCE Create Suspended QP responder ramrod data */
+struct roce_create_suspended_qp_resp_ramrod_data {
+ struct roce_create_qp_resp_ramrod_data
+ qp_params;
+ struct roce_create_suspended_qp_resp_runtime_ramrod_data
+ qp_runtime_params;
+};
+
+/* RoCE create ud qp ramrod data */
+struct roce_create_ud_qp_ramrod_data {
+ __le16 local_mac_addr[3];
+ __le16 vlan_id;
+ __le32 src_qp_id;
+ u8 name_space;
+ u8 reserved[3];
+};
+
+/* roce DCQCN received statistics */
+struct roce_dcqcn_received_stats {
+ struct regpair ecn_pkt_rcv;
+ struct regpair cnp_pkt_rcv;
+ struct regpair cnp_pkt_reject;
+};
+
+/* roce DCQCN sent statistics */
+struct roce_dcqcn_sent_stats {
+ struct regpair cnp_pkt_sent;
+};
+
+/* RoCE destroy qp requester output params */
+struct roce_destroy_qp_req_output_params {
+ __le32 cq_prod;
+ __le32 reserved;
+};
+
+/* RoCE destroy qp requester ramrod data */
+struct roce_destroy_qp_req_ramrod_data {
+ struct regpair output_params_addr;
+};
+
+/* RoCE destroy qp responder output params */
+struct roce_destroy_qp_resp_output_params {
+ __le32 cq_prod;
+ __le32 reserved;
+};
+
+/* RoCE destroy qp responder ramrod data */
+struct roce_destroy_qp_resp_ramrod_data {
+ struct regpair output_params_addr;
+ __le32 src_qp_id;
+ __le32 reserved;
+};
+
+/* RoCE destroy ud qp ramrod data */
+struct roce_destroy_ud_qp_ramrod_data {
+ __le32 src_qp_id;
+ __le32 reserved;
+};
+
+/* roce error statistics */
+struct roce_error_stats {
+ __le32 resp_remote_access_errors;
+ __le32 reserved;
+};
+
+/* roce special events statistics */
+struct roce_events_stats {
+ __le32 silent_drops;
+ __le32 rnr_naks_sent;
+ __le32 retransmit_count;
+ __le32 icrc_error_count;
+ __le32 implied_nak_seq_err;
+ __le32 duplicate_request;
+ __le32 local_ack_timeout_err;
+ __le32 out_of_sequence;
+ __le32 packet_seq_err;
+ __le32 rnr_nak_retry_err;
+};
+
+/* roce slow path EQ cmd IDs */
+enum roce_event_opcode {
+ ROCE_EVENT_CREATE_QP = 13,
+ ROCE_EVENT_MODIFY_QP,
+ ROCE_EVENT_QUERY_QP,
+ ROCE_EVENT_DESTROY_QP,
+ ROCE_EVENT_CREATE_UD_QP,
+ ROCE_EVENT_DESTROY_UD_QP,
+ ROCE_EVENT_FUNC_UPDATE,
+ ROCE_EVENT_SUSPEND_QP,
+ ROCE_EVENT_QUERY_SUSPENDED_QP,
+ ROCE_EVENT_CREATE_SUSPENDED_QP,
+ ROCE_EVENT_RESUME_QP,
+ ROCE_EVENT_SUSPEND_UD_QP,
+ ROCE_EVENT_RESUME_UD_QP,
+ ROCE_EVENT_CREATE_SUSPENDED_UD_QP,
+ ROCE_EVENT_FLUSH_DPT_QP,
+ MAX_ROCE_EVENT_OPCODE
+};
+
+/* roce func init ramrod data */
+struct roce_init_func_params {
+ u8 ll2_queue_id;
+ u8 cnp_vlan_priority;
+ u8 cnp_dscp;
+ u8 flags;
+#define ROCE_INIT_FUNC_PARAMS_DCQCN_NP_EN_MASK 0x1
+#define ROCE_INIT_FUNC_PARAMS_DCQCN_NP_EN_SHIFT 0
+#define ROCE_INIT_FUNC_PARAMS_DCQCN_RP_EN_MASK 0x1
+#define ROCE_INIT_FUNC_PARAMS_DCQCN_RP_EN_SHIFT 1
+#define ROCE_INIT_FUNC_PARAMS_RESERVED0_MASK 0x3F
+#define ROCE_INIT_FUNC_PARAMS_RESERVED0_SHIFT 2
+ __le32 cnp_send_timeout;
+ __le16 rl_offset;
+ u8 rl_count_log;
+ u8 reserved1[5];
+};
+
+/* roce func init ramrod data */
+struct roce_init_func_ramrod_data {
+ struct rdma_init_func_ramrod_data rdma;
+ struct roce_init_func_params roce;
+};
+
+/* roce_ll2_cqe_data */
+struct roce_ll2_cqe_data {
+ u8 name_space;
+ u8 flags;
+#define ROCE_LL2_CQE_DATA_QP_SUSPENDED_MASK 0x1
+#define ROCE_LL2_CQE_DATA_QP_SUSPENDED_SHIFT 0
+#define ROCE_LL2_CQE_DATA_RESERVED0_MASK 0x7F
+#define ROCE_LL2_CQE_DATA_RESERVED0_SHIFT 1
+ u8 reserved1[2];
+ __le32 cid;
+};
+
+/* roce modify qp requester ramrod data */
+struct roce_modify_qp_req_ramrod_data {
+ __le16 flags;
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG_SHIFT 0
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG_SHIFT 1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY_SHIFT 2
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG_SHIFT 3
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG_SHIFT 4
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG_SHIFT 5
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG_SHIFT 6
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG_SHIFT 7
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG_SHIFT 8
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_FLG_SHIFT 9
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_MASK 0x7
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_SHIFT 10
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PHYSICAL_QUEUE_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PHYSICAL_QUEUE_FLG_SHIFT 13
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_FORCE_LB_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_FORCE_LB_SHIFT 14
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_SHIFT 15
+ u8 fields;
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_MASK 0xF
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_SHIFT 0
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_MASK 0xF
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_SHIFT 4
+ u8 max_ord;
+ u8 traffic_class;
+ u8 hop_limit;
+ __le16 p_key;
+ __le32 flow_label;
+ __le32 ack_timeout_val;
+ __le16 mtu;
+ __le16 reserved2;
+ __le32 reserved3[2];
+ __le16 low_latency_phy_queue;
+ __le16 regular_latency_phy_queue;
+ __le32 src_gid[4];
+ __le32 dst_gid[4];
+};
+
+/* roce modify qp responder ramrod data */
+struct roce_modify_qp_resp_ramrod_data {
+ __le16 flags;
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG_SHIFT 0
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN_SHIFT 1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN_SHIFT 2
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN_SHIFT 3
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG_SHIFT 4
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG_SHIFT 5
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG_SHIFT 6
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_FLG_SHIFT 7
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG_SHIFT 8
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT 9
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PHYSICAL_QUEUE_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PHYSICAL_QUEUE_FLG_SHIFT 10
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_FORCE_LB_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_FORCE_LB_SHIFT 11
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_MASK 0xF
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_SHIFT 12
+ u8 fields;
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_MASK 0x7
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_SHIFT 0
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_MASK 0x1F
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_SHIFT 3
+ u8 max_ird;
+ u8 traffic_class;
+ u8 hop_limit;
+ __le16 p_key;
+ __le32 flow_label;
+ __le16 mtu;
+ __le16 low_latency_phy_queue;
+ __le16 regular_latency_phy_queue;
+ u8 reserved2[6];
+ __le32 src_gid[4];
+ __le32 dst_gid[4];
+};
+
+/* RoCE query qp requester output params */
+struct roce_query_qp_req_output_params {
+ __le32 psn;
+ __le32 flags;
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG_MASK 0x1
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG_SHIFT 0
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG_MASK 0x1
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG_SHIFT 1
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_RESERVED0_MASK 0x3FFFFFFF
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_RESERVED0_SHIFT 2
+};
+
+/* RoCE query qp requester ramrod data */
+struct roce_query_qp_req_ramrod_data {
+ struct regpair output_params_addr;
+};
+
+/* RoCE query qp responder output params */
+struct roce_query_qp_resp_output_params {
+ __le32 psn;
+ __le32 flags;
+#define ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG_MASK 0x1
+#define ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG_SHIFT 0
+#define ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_RESERVED0_MASK 0x7FFFFFFF
+#define ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_RESERVED0_SHIFT 1
+};
+
+/* RoCE query qp responder ramrod data */
+struct roce_query_qp_resp_ramrod_data {
+ struct regpair output_params_addr;
+};
+
+/* RoCE Query Suspended QP requester output params */
+struct roce_query_suspended_qp_req_output_params {
+ __le32 psn;
+ __le32 flags;
+#define ROCE_QUERY_SUSPENDED_QP_REQ_OUTPUT_PARAMS_ERR_FLG_MASK 0x1
+#define ROCE_QUERY_SUSPENDED_QP_REQ_OUTPUT_PARAMS_ERR_FLG_SHIFT 0
+#define ROCE_QUERY_SUSPENDED_QP_REQ_OUTPUT_PARAMS_RESERVED0_MASK 0x7FFFFFFF
+#define ROCE_QUERY_SUSPENDED_QP_REQ_OUTPUT_PARAMS_RESERVED0_SHIFT 1
+ __le32 send_msg_psn;
+ __le32 inflight_sends;
+ __le32 ssn;
+ __le32 reserved;
+};
+
+/* RoCE Query Suspended QP requester ramrod data */
+struct roce_query_suspended_qp_req_ramrod_data {
+ struct regpair output_params_addr;
+};
+
+/* RoCE Query Suspended QP responder runtime params */
+struct roce_query_suspended_qp_resp_runtime_params {
+ __le32 psn;
+ __le32 flags;
+#define ROCE_QUERY_SUSPENDED_QP_RESP_RUNTIME_PARAMS_ERR_FLG_MASK 0x1
+#define ROCE_QUERY_SUSPENDED_QP_RESP_RUNTIME_PARAMS_ERR_FLG_SHIFT 0
+#define ROCE_QUERY_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RDMA_ACTIVE_MASK 0x1
+#define ROCE_QUERY_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RDMA_ACTIVE_SHIFT 1
+#define ROCE_QUERY_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RESERVED0_MASK 0x3FFFFFFF
+#define ROCE_QUERY_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RESERVED0_SHIFT 2
+ __le32 receive_msg_psn;
+ __le32 inflight_receives;
+ __le32 rmsn;
+ __le32 rdma_key;
+ struct regpair rdma_va;
+ __le32 rdma_length;
+ __le32 num_rdb_entries;
+};
+
+/* RoCE Query Suspended QP responder output params */
+struct roce_query_suspended_qp_resp_output_params {
+ struct roce_query_suspended_qp_resp_runtime_params runtime_params;
+ struct roce_resp_qp_rdb_entry
+ rdb_array_entries[RDMA_MAX_IRQ_ELEMS_IN_PAGE];
+};
+
+/* RoCE Query Suspended QP responder ramrod data */
+struct roce_query_suspended_qp_resp_ramrod_data {
+ struct regpair output_params_addr;
+};
+
+/* ROCE ramrod command IDs */
+enum roce_ramrod_cmd_id {
+ ROCE_RAMROD_CREATE_QP = 13,
+ ROCE_RAMROD_MODIFY_QP,
+ ROCE_RAMROD_QUERY_QP,
+ ROCE_RAMROD_DESTROY_QP,
+ ROCE_RAMROD_CREATE_UD_QP,
+ ROCE_RAMROD_DESTROY_UD_QP,
+ ROCE_RAMROD_FUNC_UPDATE,
+ ROCE_RAMROD_SUSPEND_QP,
+ ROCE_RAMROD_QUERY_SUSPENDED_QP,
+ ROCE_RAMROD_CREATE_SUSPENDED_QP,
+ ROCE_RAMROD_RESUME_QP,
+ ROCE_RAMROD_SUSPEND_UD_QP,
+ ROCE_RAMROD_RESUME_UD_QP,
+ ROCE_RAMROD_CREATE_SUSPENDED_UD_QP,
+ ROCE_RAMROD_FLUSH_DPT_QP,
+ MAX_ROCE_RAMROD_CMD_ID
+};
+
+/* ROCE RDB array entry type */
+enum roce_resp_qp_rdb_entry_type {
+ ROCE_QP_RDB_ENTRY_RDMA_RESPONSE = 0,
+ ROCE_QP_RDB_ENTRY_ATOMIC_RESPONSE = 1,
+ ROCE_QP_RDB_ENTRY_INVALID = 2,
+ MAX_ROCE_RESP_QP_RDB_ENTRY_TYPE
+};
+
+/* RoCE func init ramrod data */
+struct roce_update_func_params {
+ u8 cnp_vlan_priority;
+ u8 cnp_dscp;
+ __le16 flags;
+#define ROCE_UPDATE_FUNC_PARAMS_DCQCN_NP_EN_MASK 0x1
+#define ROCE_UPDATE_FUNC_PARAMS_DCQCN_NP_EN_SHIFT 0
+#define ROCE_UPDATE_FUNC_PARAMS_DCQCN_RP_EN_MASK 0x1
+#define ROCE_UPDATE_FUNC_PARAMS_DCQCN_RP_EN_SHIFT 1
+#define ROCE_UPDATE_FUNC_PARAMS_RESERVED0_MASK 0x3FFF
+#define ROCE_UPDATE_FUNC_PARAMS_RESERVED0_SHIFT 2
+ __le32 cnp_send_timeout;
+};
+
+struct xstorm_roce_conn_ag_ctx_dq_ext_ld_part {
+ u8 reserved0;
+ u8 state;
+ u8 flags0;
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_SHIFT 1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT 3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_SHIFT 5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_SHIFT 7
+ u8 flags1;
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_SHIFT 1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_SHIFT 3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSDM_FLUSH_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSDM_FLUSH_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSEM_FLUSH_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSEM_FLUSH_SHIFT 5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_SHIFT 7
+ u8 flags2;
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF0_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF0_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF1_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF1_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF2_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF2_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF3_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF3_SHIFT 6
+ u8 flags3;
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF4_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF4_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF5_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF5_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF6_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF6_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_SHIFT 6
+ u8 flags4;
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF8_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF8_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF9_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF9_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF10_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF10_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF11_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF11_SHIFT 6
+ u8 flags5;
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF12_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF12_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF13_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF13_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF14_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF14_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF15_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF15_SHIFT 6
+ u8 flags6;
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF16_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF16_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF17_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF17_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF18_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF18_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF19_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF19_SHIFT 6
+ u8 flags7;
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF20_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF20_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF21_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF21_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_SHIFT 7
+ u8 flags8;
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_SHIFT 1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_SHIFT 3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_SHIFT 5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_SHIFT 7
+ u8 flags9;
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_SHIFT 1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_SHIFT 3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_SHIFT 5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_SHIFT 7
+ u8 flags10;
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_SHIFT 1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_SHIFT 3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_SHIFT 5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_SHIFT 7
+ u8 flags11;
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_SHIFT 1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_SHIFT 3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_SHIFT 5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_SHIFT 7
+ u8 flags12;
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_SHIFT 1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT 3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_SHIFT 5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_SHIFT 7
+ u8 flags13;
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_SHIFT 1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT 3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT 5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT 6
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_SHIFT 0
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_SHIFT 1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_SHIFT 2
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_SHIFT 4
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK 0x1
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF23_MASK 0x3
+#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF23_SHIFT 6
+ u8 byte2;
+ __le16 physical_q0;
+ __le16 word1;
+ __le16 word2;
+ __le16 word3;
+ __le16 word4;
+ __le16 word5;
+ __le16 conn_dpi;
+ u8 byte3;
+ u8 byte4;
+ u8 byte5;
+ u8 byte6;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 snd_nxt_psn;
+ __le32 reg4;
+};
+
+struct mstorm_roce_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define MSTORM_ROCE_CONN_AG_CTX_BIT0_MASK 0x1
+#define MSTORM_ROCE_CONN_AG_CTX_BIT0_SHIFT 0
+#define MSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define MSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 7
+ __le16 word0;
+ __le16 word1;
+ __le32 reg0;
+ __le32 reg1;
+};
+
+struct mstorm_roce_req_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7
+ __le16 word0;
+ __le16 word1;
+ __le32 reg0;
+ __le32 reg1;
+};
+
+struct mstorm_roce_resp_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7
+ __le16 word0;
+ __le16 word1;
+ __le32 reg0;
+ __le32 reg1;
+};
+
+struct tstorm_roce_req_conn_ag_ctx {
+ u8 reserved0;
+ u8 state;
+ u8 flags0;
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURRED_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURRED_SHIFT 1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURRED_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURRED_SHIFT 2
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_SHIFT 5
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_SHIFT 6
+ u8 flags1;
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_SHIFT 2
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+ u8 flags2;
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_SHIFT 0
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_SHIFT 2
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_SHIFT 4
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_SHIFT 6
+ u8 flags3;
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_SHIFT 0
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_SHIFT 2
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_SHIFT 4
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 5
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_SHIFT 6
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
+ u8 flags4;
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_EN_SHIFT 1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_SHIFT 2
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_SHIFT 3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_SHIFT 4
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_SHIFT 5
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_SHIFT 6
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags5;
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_DIF_CNT_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_DIF_CNT_EN_SHIFT 1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_SHIFT 5
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7
+ __le32 dif_rxmit_cnt;
+ __le32 snd_nxt_psn;
+ __le32 snd_max_psn;
+ __le32 orq_prod;
+ __le32 reg4;
+ __le32 dif_acked_cnt;
+ __le32 dif_cnt;
+ __le32 reg7;
+ __le32 reg8;
+ u8 tx_cqe_error_type;
+ u8 orq_cache_idx;
+ __le16 snd_sq_cons_th;
+ u8 byte4;
+ u8 byte5;
+ __le16 snd_sq_cons;
+ __le16 conn_dpi;
+ __le16 force_comp_cons;
+ __le32 dif_rxmit_acked_cnt;
+ __le32 reg10;
+};
+
+struct tstorm_roce_resp_conn_ag_ctx {
+ u8 byte0;
+ u8 state;
+ u8 flags0;
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_SHIFT 1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 6
+ u8 flags1;
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_SHIFT 2
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 4
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+ u8 flags2;
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 0
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 6
+ u8 flags3;
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 0
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 2
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 5
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_SHIFT 6
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 7
+ u8 flags4;
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 5
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 6
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags5;
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_SHIFT 5
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7
+ __le32 psn_and_rxmit_id_echo;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le32 reg4;
+ __le32 reg5;
+ __le32 reg6;
+ __le32 reg7;
+ __le32 reg8;
+ u8 tx_async_error_type;
+ u8 byte3;
+ __le16 rq_cons;
+ u8 byte4;
+ u8 byte5;
+ __le16 rq_prod;
+ __le16 conn_dpi;
+ __le16 irq_cons;
+ __le32 reg9;
+ __le32 reg10;
+};
+
+struct ustorm_roce_req_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
+#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4_MASK 0x3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4_SHIFT 2
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5_MASK 0x3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5_SHIFT 4
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6_MASK 0x3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6_SHIFT 6
+ u8 flags2;
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_SHIFT 4
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_SHIFT 5
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags3;
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le16 word1;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le16 word2;
+ __le16 word3;
+};
+
+struct ustorm_roce_resp_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
+#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4_MASK 0x3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4_SHIFT 2
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5_MASK 0x3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5_SHIFT 4
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 6
+ u8 flags2;
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_SHIFT 4
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_SHIFT 5
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags3;
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le16 word1;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le16 word2;
+ __le16 word3;
+};
+
+struct xstorm_roce_req_conn_ag_ctx {
+ u8 reserved0;
+ u8 state;
+ u8 flags0;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_SHIFT 7
+ u8 flags1;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_MSDM_FLUSH_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_MSEM_FLUSH_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_MSEM_FLUSH_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7
+ u8 flags2;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 6
+ u8 flags3;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+ u8 flags4;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_SHIFT 6
+ u8 flags5;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_SHIFT 6
+ u8 flags6;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_SHIFT 6
+ u8 flags7;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 7
+ u8 flags8;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_EN_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_EN_SHIFT 7
+ u8 flags9;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_SHIFT 7
+ u8 flags10;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 7
+ u8 flags11;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_SHIFT 7
+ u8 flags12;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_SHIFT 7
+ u8 flags13;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_SHIFT 6
+ u8 byte2;
+ __le16 physical_q0;
+ __le16 word1;
+ __le16 sq_cmp_cons;
+ __le16 sq_cons;
+ __le16 sq_prod;
+ __le16 dif_error_first_sq_cons;
+ __le16 conn_dpi;
+ u8 dif_error_sge_index;
+ u8 byte4;
+ u8 byte5;
+ u8 byte6;
+ __le32 lsn;
+ __le32 ssn;
+ __le32 snd_una_psn;
+ __le32 snd_nxt_psn;
+ __le32 dif_error_offset;
+ __le32 orq_cons_th;
+ __le32 orq_cons;
+};
+
+struct xstorm_roce_resp_conn_ag_ctx {
+ u8 reserved0;
+ u8 state;
+ u8 flags0;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_SHIFT 7
+ u8 flags1;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_MSDM_FLUSH_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_MSEM_FLUSH_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_MSEM_FLUSH_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7
+ u8 flags2;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 6
+ u8 flags3;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+ u8 flags4;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_SHIFT 6
+ u8 flags5;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_SHIFT 6
+ u8 flags6;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_SHIFT 6
+ u8 flags7;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 7
+ u8 flags8;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 7
+ u8 flags9;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_SHIFT 7
+ u8 flags10;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 7
+ u8 flags11;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_SHIFT 7
+ u8 flags12;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_SHIFT 7
+ u8 flags13;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_SHIFT 6
+ u8 byte2;
+ __le16 physical_q0;
+ __le16 irq_prod_shadow;
+ __le16 word2;
+ __le16 irq_cons;
+ __le16 irq_prod;
+ __le16 e5_reserved1;
+ __le16 conn_dpi;
+ u8 rxmit_opcode;
+ u8 byte4;
+ u8 byte5;
+ u8 byte6;
+ __le32 rxmit_psn_and_id;
+ __le32 rxmit_bytes_length;
+ __le32 psn;
+ __le32 reg3;
+ __le32 reg4;
+ __le32 reg5;
+ __le32 msn_and_syndrome;
+};
+
+struct ystorm_roce_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define YSTORM_ROCE_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_ROCE_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3
+#define YSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3
+#define YSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define YSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le32 reg0;
+ __le32 reg1;
+ __le16 word1;
+ __le16 word2;
+ __le16 word3;
+ __le16 word4;
+ __le32 reg2;
+ __le32 reg3;
+};
+
+struct ystorm_roce_req_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le32 reg0;
+ __le32 reg1;
+ __le16 word1;
+ __le16 word2;
+ __le16 word3;
+ __le16 word4;
+ __le32 reg2;
+ __le32 reg3;
+};
+
+struct ystorm_roce_resp_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le32 reg0;
+ __le32 reg1;
+ __le16 word1;
+ __le16 word2;
+ __le16 word3;
+ __le16 word4;
+ __le32 reg2;
+ __le32 reg3;
+};
+
+/* Roce doorbell data */
+enum roce_flavor {
+ PLAIN_ROCE,
+ RROCE_IPV4,
+ RROCE_IPV6,
+ MAX_ROCE_FLAVOR
+};
+
+/* The iwarp storm context of Ystorm */
+struct ystorm_iwarp_conn_st_ctx {
+ __le32 reserved[4];
+};
+
+/* The iwarp storm context of Pstorm */
+struct pstorm_iwarp_conn_st_ctx {
+ __le32 reserved[36];
+};
+
+/* The iwarp storm context of Xstorm */
+struct xstorm_iwarp_conn_st_ctx {
+ __le32 reserved[48];
+};
+
+struct xstorm_iwarp_conn_ag_ctx {
+ u8 reserved0;
+ u8 state;
+ u8 flags0;
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RESERVED2_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_BIT6_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT6_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_BIT7_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT7_SHIFT 7
+ u8 flags1;
+#define XSTORM_IWARP_CONN_AG_CTX_BIT8_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT8_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_BIT9_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT9_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT10_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT10_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_BIT12_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_BIT13_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_BIT14_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT14_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_SHIFT 7
+ u8 flags2;
+#define XSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6
+ u8 flags3;
+#define XSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 6
+ u8 flags4;
+#define XSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF11_SHIFT 6
+ u8 flags5;
+#define XSTORM_IWARP_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF15_SHIFT 6
+ u8 flags6;
+#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_CF18_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF18_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_SHIFT 6
+ u8 flags7;
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 7
+ u8 flags8;
+#define XSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF9EN_SHIFT 7
+ u8 flags9;
+#define XSTORM_IWARP_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF17EN_SHIFT 7
+ u8 flags10;
+#define XSTORM_IWARP_CONN_AG_CTX_CF18EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF18EN_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_EN_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_SHIFT 7
+ u8 flags11;
+#define XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RESERVED3_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE9EN_SHIFT 7
+ u8 flags12;
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE17EN_SHIFT 7
+ u8 flags13;
+#define XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_RULE21EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE21EN_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define XSTORM_IWARP_CONN_AG_CTX_BIT16_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT16_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_BIT17_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT18_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT18_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_SHIFT 6
+ u8 byte2;
+ __le16 physical_q0;
+ __le16 physical_q1;
+ __le16 sq_comp_cons;
+ __le16 sq_tx_cons;
+ __le16 sq_prod;
+ __le16 word5;
+ __le16 conn_dpi;
+ u8 byte3;
+ u8 byte4;
+ u8 byte5;
+ u8 byte6;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 more_to_send_seq;
+ __le32 reg4;
+ __le32 rewinded_snd_max_or_term_opcode;
+ __le32 rd_msn;
+ __le16 irq_prod_via_msdm;
+ __le16 irq_cons;
+ __le16 hq_cons_th_or_mpa_data;
+ __le16 hq_cons;
+ __le32 atom_msn;
+ __le32 orq_cons;
+ __le32 orq_cons_th;
+ u8 byte7;
+ u8 wqe_data_pad_bytes;
+ u8 max_ord;
+ u8 former_hq_prod;
+ u8 irq_prod_via_msem;
+ u8 byte12;
+ u8 max_pkt_pdu_size_lo;
+ u8 max_pkt_pdu_size_hi;
+ u8 byte15;
+ u8 e5_reserved;
+ __le16 e5_reserved4;
+ __le32 reg10;
+ __le32 reg11;
+ __le32 shared_queue_page_addr_lo;
+ __le32 shared_queue_page_addr_hi;
+ __le32 reg14;
+ __le32 reg15;
+ __le32 reg16;
+ __le32 reg17;
+};
+
+struct tstorm_iwarp_conn_ag_ctx {
+ u8 reserved0;
+ u8 state;
+ u8 flags0;
+#define TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define TSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_IWARP_CONN_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_OR_TERMINATE_SENT_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_OR_TERMINATE_SENT_SHIFT 3
+#define TSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_SHIFT 5
+#define TSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 6
+ u8 flags1;
+#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_SHIFT 0
+#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_SHIFT 2
+#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4
+#define TSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 6
+ u8 flags2;
+#define TSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 6
+ u8 flags3;
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPLETE_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPLETE_SHIFT 0
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_SHIFT 2
+#define TSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_SHIFT 5
+#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_SHIFT 6
+#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7
+ u8 flags4;
+#define TSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 0
+#define TSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPL_EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPL_EN_SHIFT 5
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_SHIFT 6
+#define TSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags5;
+#define TSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_SHIFT 5
+#define TSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7
+ __le32 reg0;
+ __le32 reg1;
+ __le32 unaligned_nxt_seq;
+ __le32 reg3;
+ __le32 reg4;
+ __le32 reg5;
+ __le32 reg6;
+ __le32 reg7;
+ __le32 reg8;
+ u8 orq_cache_idx;
+ u8 hq_prod;
+ __le16 sq_tx_cons_th;
+ u8 orq_prod;
+ u8 irq_cons;
+ __le16 sq_tx_cons;
+ __le16 conn_dpi;
+ __le16 rq_prod;
+ __le32 snd_seq;
+ __le32 last_hq_sequence;
+};
+
+/* The iwarp storm context of Tstorm */
+struct tstorm_iwarp_conn_st_ctx {
+ __le32 reserved[60];
+};
+
+/* The iwarp storm context of Mstorm */
+struct mstorm_iwarp_conn_st_ctx {
+ __le32 reserved[32];
+};
+
+/* The iwarp storm context of Ustorm */
+struct ustorm_iwarp_conn_st_ctx {
+ struct regpair reserved[14];
+};
+
+/* iwarp connection context */
+struct iwarp_conn_context {
+ struct ystorm_iwarp_conn_st_ctx ystorm_st_context;
+ struct regpair ystorm_st_padding[2];
+ struct pstorm_iwarp_conn_st_ctx pstorm_st_context;
+ struct regpair pstorm_st_padding[2];
+ struct xstorm_iwarp_conn_st_ctx xstorm_st_context;
+ struct xstorm_iwarp_conn_ag_ctx xstorm_ag_context;
+ struct tstorm_iwarp_conn_ag_ctx tstorm_ag_context;
+ struct timers_context timer_context;
+ struct ustorm_rdma_conn_ag_ctx ustorm_ag_context;
+ struct tstorm_iwarp_conn_st_ctx tstorm_st_context;
+ struct regpair tstorm_st_padding[2];
+ struct mstorm_iwarp_conn_st_ctx mstorm_st_context;
+ struct ustorm_iwarp_conn_st_ctx ustorm_st_context;
+ struct regpair ustorm_st_padding[2];
+};
+
+/* iWARP create QP params passed by driver to FW in CreateQP Request Ramrod */
+struct iwarp_create_qp_ramrod_data {
+ u8 flags;
+#define IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN_MASK 0x1
+#define IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN_SHIFT 0
+#define IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP_MASK 0x1
+#define IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP_SHIFT 1
+#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1
+#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN_SHIFT 2
+#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1
+#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN_SHIFT 3
+#define IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN_MASK 0x1
+#define IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN_SHIFT 4
+#define IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG_MASK 0x1
+#define IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG_SHIFT 5
+#define IWARP_CREATE_QP_RAMROD_DATA_LOW_LATENCY_QUEUE_EN_MASK 0x1
+#define IWARP_CREATE_QP_RAMROD_DATA_LOW_LATENCY_QUEUE_EN_SHIFT 6
+#define IWARP_CREATE_QP_RAMROD_DATA_RESERVED0_MASK 0x1
+#define IWARP_CREATE_QP_RAMROD_DATA_RESERVED0_SHIFT 7
+ u8 reserved1;
+ __le16 pd;
+ __le16 sq_num_pages;
+ __le16 rq_num_pages;
+ __le32 reserved3[2];
+ struct regpair qp_handle_for_cqe;
+ struct rdma_srq_id srq_id;
+ __le32 cq_cid_for_sq;
+ __le32 cq_cid_for_rq;
+ __le16 dpi;
+ __le16 physical_q0;
+ __le16 physical_q1;
+ u8 reserved2[6];
+};
+
+/* iWARP completion queue types */
+enum iwarp_eqe_async_opcode {
+ IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE,
+ IWARP_EVENT_TYPE_ASYNC_ENHANCED_MPA_REPLY_ARRIVED,
+ IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE,
+ IWARP_EVENT_TYPE_ASYNC_CID_CLEANED,
+ IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED,
+ IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE,
+ IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW,
+ IWARP_EVENT_TYPE_ASYNC_SRQ_LIMIT,
+ IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY,
+ MAX_IWARP_EQE_ASYNC_OPCODE
+};
+
+struct iwarp_eqe_data_mpa_async_completion {
+ __le16 ulp_data_len;
+ u8 rtr_type_sent;
+ u8 reserved[5];
+};
+
+struct iwarp_eqe_data_tcp_async_completion {
+ __le16 ulp_data_len;
+ u8 mpa_handshake_mode;
+ u8 reserved[5];
+};
+
+/* iWARP completion queue types */
+enum iwarp_eqe_sync_opcode {
+ IWARP_EVENT_TYPE_TCP_OFFLOAD = 13,
+ IWARP_EVENT_TYPE_MPA_OFFLOAD,
+ IWARP_EVENT_TYPE_MPA_OFFLOAD_SEND_RTR,
+ IWARP_EVENT_TYPE_CREATE_QP,
+ IWARP_EVENT_TYPE_QUERY_QP,
+ IWARP_EVENT_TYPE_MODIFY_QP,
+ IWARP_EVENT_TYPE_DESTROY_QP,
+ IWARP_EVENT_TYPE_ABORT_TCP_OFFLOAD,
+ MAX_IWARP_EQE_SYNC_OPCODE
+};
+
+/* iWARP EQE completion status */
+enum iwarp_fw_return_code {
+ IWARP_CONN_ERROR_TCP_CONNECT_INVALID_PACKET = 6,
+ IWARP_CONN_ERROR_TCP_CONNECTION_RST,
+ IWARP_CONN_ERROR_TCP_CONNECT_TIMEOUT,
+ IWARP_CONN_ERROR_MPA_ERROR_REJECT,
+ IWARP_CONN_ERROR_MPA_NOT_SUPPORTED_VER,
+ IWARP_CONN_ERROR_MPA_RST,
+ IWARP_CONN_ERROR_MPA_FIN,
+ IWARP_CONN_ERROR_MPA_RTR_MISMATCH,
+ IWARP_CONN_ERROR_MPA_INSUF_IRD,
+ IWARP_CONN_ERROR_MPA_INVALID_PACKET,
+ IWARP_CONN_ERROR_MPA_LOCAL_ERROR,
+ IWARP_CONN_ERROR_MPA_TIMEOUT,
+ IWARP_CONN_ERROR_MPA_TERMINATE,
+ IWARP_QP_IN_ERROR_GOOD_CLOSE,
+ IWARP_QP_IN_ERROR_BAD_CLOSE,
+ IWARP_EXCEPTION_DETECTED_LLP_CLOSED,
+ IWARP_EXCEPTION_DETECTED_LLP_RESET,
+ IWARP_EXCEPTION_DETECTED_IRQ_FULL,
+ IWARP_EXCEPTION_DETECTED_RQ_EMPTY,
+ IWARP_EXCEPTION_DETECTED_LLP_TIMEOUT,
+ IWARP_EXCEPTION_DETECTED_REMOTE_PROTECTION_ERROR,
+ IWARP_EXCEPTION_DETECTED_CQ_OVERFLOW,
+ IWARP_EXCEPTION_DETECTED_LOCAL_CATASTROPHIC,
+ IWARP_EXCEPTION_DETECTED_LOCAL_ACCESS_ERROR,
+ IWARP_EXCEPTION_DETECTED_REMOTE_OPERATION_ERROR,
+ IWARP_EXCEPTION_DETECTED_TERMINATE_RECEIVED,
+ MAX_IWARP_FW_RETURN_CODE
+};
+
+/* unaligned opaque data received from LL2 */
+struct iwarp_init_func_params {
+ u8 ll2_ooo_q_index;
+ u8 reserved1[7];
+};
+
+/* iwarp func init ramrod data */
+struct iwarp_init_func_ramrod_data {
+ struct rdma_init_func_ramrod_data rdma;
+ struct tcp_init_params tcp;
+ struct iwarp_init_func_params iwarp;
+};
+
+/* iWARP QP - possible states to transition to */
+enum iwarp_modify_qp_new_state_type {
+ IWARP_MODIFY_QP_STATE_CLOSING = 1,
+ IWARP_MODIFY_QP_STATE_ERROR = 2,
+ MAX_IWARP_MODIFY_QP_NEW_STATE_TYPE
+};
+
+/* iwarp modify qp responder ramrod data */
+struct iwarp_modify_qp_ramrod_data {
+ __le16 transition_to_state;
+ __le16 flags;
+#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1
+#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_RD_EN_SHIFT 0
+#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1
+#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_WR_EN_SHIFT 1
+#define IWARP_MODIFY_QP_RAMROD_DATA_ATOMIC_EN_MASK 0x1
+#define IWARP_MODIFY_QP_RAMROD_DATA_ATOMIC_EN_SHIFT 2
+#define IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN_MASK 0x1
+#define IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN_SHIFT 3
+#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_OPS_EN_FLG_MASK 0x1
+#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT 4
+#define IWARP_MODIFY_QP_RAMROD_DATA_PHYSICAL_QUEUE_FLG_MASK 0x1
+#define IWARP_MODIFY_QP_RAMROD_DATA_PHYSICAL_QUEUE_FLG_SHIFT 5
+#define IWARP_MODIFY_QP_RAMROD_DATA_RESERVED_MASK 0x3FF
+#define IWARP_MODIFY_QP_RAMROD_DATA_RESERVED_SHIFT 6
+ __le16 physical_q0;
+ __le16 physical_q1;
+ __le32 reserved1[10];
+};
+
+/* MPA params for Enhanced mode */
+struct mpa_rq_params {
+ __le32 ird;
+ __le32 ord;
+};
+
+/* MPA host Address-Len for private data */
+struct mpa_ulp_buffer {
+ struct regpair addr;
+ __le16 len;
+ __le16 reserved[3];
+};
+
+/* iWARP MPA offload params common to Basic and Enhanced modes */
+struct mpa_outgoing_params {
+ u8 crc_needed;
+ u8 reject;
+ u8 reserved[6];
+ struct mpa_rq_params out_rq;
+ struct mpa_ulp_buffer outgoing_ulp_buffer;
+};
+
+/* iWARP MPA offload params passed by driver to FW in MPA Offload Request
+ * Ramrod.
+ */
+struct iwarp_mpa_offload_ramrod_data {
+ struct mpa_outgoing_params common;
+ __le32 tcp_cid;
+ u8 mode;
+ u8 tcp_connect_side;
+ u8 rtr_pref;
+#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED_MASK 0x7
+#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED_SHIFT 0
+#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RESERVED1_MASK 0x1F
+#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RESERVED1_SHIFT 3
+ u8 reserved2;
+ struct mpa_ulp_buffer incoming_ulp_buffer;
+ struct regpair async_eqe_output_buf;
+ struct regpair handle_for_async;
+ struct regpair shared_queue_addr;
+ __le32 additional_setup_time;
+ __le16 rcv_wnd;
+ u8 stats_counter_id;
+ u8 reserved3[9];
+};
+
+/* iWARP TCP connection offload params passed by driver to FW */
+struct iwarp_offload_params {
+ struct mpa_ulp_buffer incoming_ulp_buffer;
+ struct regpair async_eqe_output_buf;
+ struct regpair handle_for_async;
+ __le32 additional_setup_time;
+ __le16 physical_q0;
+ __le16 physical_q1;
+ u8 stats_counter_id;
+ u8 mpa_mode;
+ u8 src_vport_id;
+ u8 reserved[5];
+};
+
+/* iWARP query QP output params */
+struct iwarp_query_qp_output_params {
+ __le32 flags;
+#define IWARP_QUERY_QP_OUTPUT_PARAMS_ERROR_FLG_MASK 0x1
+#define IWARP_QUERY_QP_OUTPUT_PARAMS_ERROR_FLG_SHIFT 0
+#define IWARP_QUERY_QP_OUTPUT_PARAMS_RESERVED0_MASK 0x7FFFFFFF
+#define IWARP_QUERY_QP_OUTPUT_PARAMS_RESERVED0_SHIFT 1
+ u8 reserved1[4];
+};
+
+/* iWARP query QP ramrod data */
+struct iwarp_query_qp_ramrod_data {
+ struct regpair output_params_addr;
+};
+
+/* iWARP Ramrod Command IDs */
+enum iwarp_ramrod_cmd_id {
+ IWARP_RAMROD_CMD_ID_TCP_OFFLOAD = 13,
+ IWARP_RAMROD_CMD_ID_MPA_OFFLOAD,
+ IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR,
+ IWARP_RAMROD_CMD_ID_CREATE_QP,
+ IWARP_RAMROD_CMD_ID_QUERY_QP,
+ IWARP_RAMROD_CMD_ID_MODIFY_QP,
+ IWARP_RAMROD_CMD_ID_DESTROY_QP,
+ IWARP_RAMROD_CMD_ID_ABORT_TCP_OFFLOAD,
+ MAX_IWARP_RAMROD_CMD_ID
+};
+
+/* Per PF iWARP retransmit path statistics */
+struct iwarp_rxmit_stats_drv {
+ struct regpair tx_go_to_slow_start_event_cnt;
+ struct regpair tx_fast_retransmit_event_cnt;
+};
+
+/* iWARP and TCP connection offload params passed by driver to FW in iWARP
+ * offload ramrod.
+ */
+struct iwarp_tcp_offload_ramrod_data {
+ struct tcp_offload_params_opt2 tcp;
+ struct iwarp_offload_params iwarp;
+};
+
+/* iWARP MPA negotiation types */
+enum mpa_negotiation_mode {
+ MPA_NEGOTIATION_TYPE_BASIC = 1,
+ MPA_NEGOTIATION_TYPE_ENHANCED = 2,
+ MAX_MPA_NEGOTIATION_MODE
+};
+
+/* iWARP MPA Enhanced mode RTR types */
+enum mpa_rtr_type {
+ MPA_RTR_TYPE_NONE = 0,
+ MPA_RTR_TYPE_ZERO_SEND = 1,
+ MPA_RTR_TYPE_ZERO_WRITE = 2,
+ MPA_RTR_TYPE_ZERO_SEND_AND_WRITE = 3,
+ MPA_RTR_TYPE_ZERO_READ = 4,
+ MPA_RTR_TYPE_ZERO_SEND_AND_READ = 5,
+ MPA_RTR_TYPE_ZERO_WRITE_AND_READ = 6,
+ MPA_RTR_TYPE_ZERO_SEND_AND_WRITE_AND_READ = 7,
+ MAX_MPA_RTR_TYPE
+};
+
+/* unaligned opaque data received from LL2 */
+struct unaligned_opaque_data {
+ __le16 first_mpa_offset;
+ u8 tcp_payload_offset;
+ u8 flags;
+#define UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE_MASK 0x1
+#define UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE_SHIFT 0
+#define UNALIGNED_OPAQUE_DATA_CONNECTION_CLOSED_MASK 0x1
+#define UNALIGNED_OPAQUE_DATA_CONNECTION_CLOSED_SHIFT 1
+#define UNALIGNED_OPAQUE_DATA_RESERVED_MASK 0x3F
+#define UNALIGNED_OPAQUE_DATA_RESERVED_SHIFT 2
+ __le32 cid;
+};
+
+struct mstorm_iwarp_conn_ag_ctx {
+ u8 reserved;
+ u8 state;
+ u8 flags0;
+#define MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define MSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_MASK 0x3
+#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_SHIFT 2
+#define MSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_SHIFT 0
+#define MSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_SHIFT 6
+#define MSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7
+ __le16 rcq_cons;
+ __le16 rcq_cons_th;
+ __le32 reg0;
+ __le32 reg1;
+};
+
+struct ustorm_iwarp_conn_ag_ctx {
+ u8 reserved;
+ u8 byte1;
+ u8 flags0;
+#define USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define USTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
+#define USTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2
+#define USTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
+#define USTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define USTORM_IWARP_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_IWARP_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4
+#define USTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3
+#define USTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 6
+ u8 flags2;
+#define USTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_IWARP_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5
+#define USTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_SHIFT 7
+ u8 flags3;
+#define USTORM_IWARP_CONN_AG_CTX_CQ_EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CQ_EN_SHIFT 0
+#define USTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le16 word1;
+ __le32 cq_cons;
+ __le32 cq_se_prod;
+ __le32 cq_prod;
+ __le32 reg3;
+ __le16 word2;
+ __le16 word3;
+};
+
+struct ystorm_iwarp_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define YSTORM_IWARP_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
+#define YSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
+#define YSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define YSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le32 reg0;
+ __le32 reg1;
+ __le16 word1;
+ __le16 word2;
+ __le16 word3;
+ __le16 word4;
+ __le32 reg2;
+ __le32 reg3;
+};
+
+/* The fcoe storm context of Ystorm */
+struct ystorm_fcoe_conn_st_ctx {
+ u8 func_mode;
+ u8 cos;
+ u8 conf_version;
+ u8 eth_hdr_size;
+ __le16 stat_ram_addr;
+ __le16 mtu;
+ __le16 max_fc_payload_len;
+ __le16 tx_max_fc_pay_len;
+ u8 fcp_cmd_size;
+ u8 fcp_rsp_size;
+ __le16 mss;
+ struct regpair reserved;
+ __le16 min_frame_size;
+ u8 protection_info_flags;
+#define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_MASK 0x1
+#define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_SHIFT 0
+#define YSTORM_FCOE_CONN_ST_CTX_VALID_MASK 0x1
+#define YSTORM_FCOE_CONN_ST_CTX_VALID_SHIFT 1
+#define YSTORM_FCOE_CONN_ST_CTX_RESERVED1_MASK 0x3F
+#define YSTORM_FCOE_CONN_ST_CTX_RESERVED1_SHIFT 2
+ u8 dst_protection_per_mss;
+ u8 src_protection_per_mss;
+ u8 ptu_log_page_size;
+ u8 flags;
+#define YSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK 0x1
+#define YSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT 0
+#define YSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_MASK 0x1
+#define YSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_SHIFT 1
+#define YSTORM_FCOE_CONN_ST_CTX_RSRV_MASK 0x3F
+#define YSTORM_FCOE_CONN_ST_CTX_RSRV_SHIFT 2
+ u8 fcp_xfer_size;
+};
+
+/* FCoE 16-bits vlan structure */
+struct fcoe_vlan_fields {
+ __le16 fields;
+#define FCOE_VLAN_FIELDS_VID_MASK 0xFFF
+#define FCOE_VLAN_FIELDS_VID_SHIFT 0
+#define FCOE_VLAN_FIELDS_CLI_MASK 0x1
+#define FCOE_VLAN_FIELDS_CLI_SHIFT 12
+#define FCOE_VLAN_FIELDS_PRI_MASK 0x7
+#define FCOE_VLAN_FIELDS_PRI_SHIFT 13
+};
+
+/* FCoE 16-bits vlan union */
+union fcoe_vlan_field_union {
+ struct fcoe_vlan_fields fields;
+ __le16 val;
+};
+
+/* FCoE 16-bits vlan, vif union */
+union fcoe_vlan_vif_field_union {
+ union fcoe_vlan_field_union vlan;
+ __le16 vif;
+};
+
+/* Ethernet context section */
+struct pstorm_fcoe_eth_context_section {
+ u8 remote_addr_3;
+ u8 remote_addr_2;
+ u8 remote_addr_1;
+ u8 remote_addr_0;
+ u8 local_addr_1;
+ u8 local_addr_0;
+ u8 remote_addr_5;
+ u8 remote_addr_4;
+ u8 local_addr_5;
+ u8 local_addr_4;
+ u8 local_addr_3;
+ u8 local_addr_2;
+ union fcoe_vlan_vif_field_union vif_outer_vlan;
+ __le16 vif_outer_eth_type;
+ union fcoe_vlan_vif_field_union inner_vlan;
+ __le16 inner_eth_type;
+};
+
+/* The fcoe storm context of Pstorm */
+struct pstorm_fcoe_conn_st_ctx {
+ u8 func_mode;
+ u8 cos;
+ u8 conf_version;
+ u8 rsrv;
+ __le16 stat_ram_addr;
+ __le16 mss;
+ struct regpair abts_cleanup_addr;
+ struct pstorm_fcoe_eth_context_section eth;
+ u8 sid_2;
+ u8 sid_1;
+ u8 sid_0;
+ u8 flags;
+#define PSTORM_FCOE_CONN_ST_CTX_VNTAG_VLAN_MASK 0x1
+#define PSTORM_FCOE_CONN_ST_CTX_VNTAG_VLAN_SHIFT 0
+#define PSTORM_FCOE_CONN_ST_CTX_SUPPORT_REC_RR_TOV_MASK 0x1
+#define PSTORM_FCOE_CONN_ST_CTX_SUPPORT_REC_RR_TOV_SHIFT 1
+#define PSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK 0x1
+#define PSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT 2
+#define PSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_MASK 0x1
+#define PSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_SHIFT 3
+#define PSTORM_FCOE_CONN_ST_CTX_SINGLE_VLAN_FLAG_MASK 0x1
+#define PSTORM_FCOE_CONN_ST_CTX_SINGLE_VLAN_FLAG_SHIFT 4
+#define PSTORM_FCOE_CONN_ST_CTX_RESERVED_MASK 0x7
+#define PSTORM_FCOE_CONN_ST_CTX_RESERVED_SHIFT 5
+ u8 did_2;
+ u8 did_1;
+ u8 did_0;
+ u8 src_mac_index;
+ __le16 rec_rr_tov_val;
+ u8 q_relative_offset;
+ u8 reserved1;
+};
+
+/* The fcoe storm context of Xstorm */
+struct xstorm_fcoe_conn_st_ctx {
+ u8 func_mode;
+ u8 src_mac_index;
+ u8 conf_version;
+ u8 cached_wqes_avail;
+ __le16 stat_ram_addr;
+ u8 flags;
+#define XSTORM_FCOE_CONN_ST_CTX_SQ_DEFERRED_MASK 0x1
+#define XSTORM_FCOE_CONN_ST_CTX_SQ_DEFERRED_SHIFT 0
+#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK 0x1
+#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT 1
+#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_ORIG_MASK 0x1
+#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_ORIG_SHIFT 2
+#define XSTORM_FCOE_CONN_ST_CTX_LAST_QUEUE_HANDLED_MASK 0x3
+#define XSTORM_FCOE_CONN_ST_CTX_LAST_QUEUE_HANDLED_SHIFT 3
+#define XSTORM_FCOE_CONN_ST_CTX_RSRV_MASK 0x7
+#define XSTORM_FCOE_CONN_ST_CTX_RSRV_SHIFT 5
+ u8 cached_wqes_offset;
+ u8 reserved2;
+ u8 eth_hdr_size;
+ u8 seq_id;
+ u8 max_conc_seqs;
+ __le16 num_pages_in_pbl;
+ __le16 reserved;
+ struct regpair sq_pbl_addr;
+ struct regpair sq_curr_page_addr;
+ struct regpair sq_next_page_addr;
+ struct regpair xferq_pbl_addr;
+ struct regpair xferq_curr_page_addr;
+ struct regpair xferq_next_page_addr;
+ struct regpair respq_pbl_addr;
+ struct regpair respq_curr_page_addr;
+ struct regpair respq_next_page_addr;
+ __le16 mtu;
+ __le16 tx_max_fc_pay_len;
+ __le16 max_fc_payload_len;
+ __le16 min_frame_size;
+ __le16 sq_pbl_next_index;
+ __le16 respq_pbl_next_index;
+ u8 fcp_cmd_byte_credit;
+ u8 fcp_rsp_byte_credit;
+ __le16 protection_info;
+#define XSTORM_FCOE_CONN_ST_CTX_PROTECTION_PERF_MASK 0x1
+#define XSTORM_FCOE_CONN_ST_CTX_PROTECTION_PERF_SHIFT 0
+#define XSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_MASK 0x1
+#define XSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_SHIFT 1
+#define XSTORM_FCOE_CONN_ST_CTX_VALID_MASK 0x1
+#define XSTORM_FCOE_CONN_ST_CTX_VALID_SHIFT 2
+#define XSTORM_FCOE_CONN_ST_CTX_FRAME_PROT_ALIGNED_MASK 0x1
+#define XSTORM_FCOE_CONN_ST_CTX_FRAME_PROT_ALIGNED_SHIFT 3
+#define XSTORM_FCOE_CONN_ST_CTX_RESERVED3_MASK 0xF
+#define XSTORM_FCOE_CONN_ST_CTX_RESERVED3_SHIFT 4
+#define XSTORM_FCOE_CONN_ST_CTX_DST_PROTECTION_PER_MSS_MASK 0xFF
+#define XSTORM_FCOE_CONN_ST_CTX_DST_PROTECTION_PER_MSS_SHIFT 8
+ __le16 xferq_pbl_next_index;
+ __le16 page_size;
+ u8 mid_seq;
+ u8 fcp_xfer_byte_credit;
+ u8 reserved1[2];
+ struct fcoe_wqe cached_wqes[16];
+};
+
+struct xstorm_fcoe_conn_ag_ctx {
+ u8 reserved0;
+ u8 state;
+ u8 flags0;
+#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED6_SHIFT 7
+ u8 flags1;
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_FCOE_CONN_AG_CTX_BIT12_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_BIT13_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_FCOE_CONN_AG_CTX_BIT14_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT14_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_BIT15_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT15_SHIFT 7
+ u8 flags2;
+#define XSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 6
+ u8 flags3;
+#define XSTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_CF7_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT 6
+ u8 flags4;
+#define XSTORM_FCOE_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF11_SHIFT 6
+ u8 flags5;
+#define XSTORM_FCOE_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF15_SHIFT 6
+ u8 flags6;
+#define XSTORM_FCOE_CONN_AG_CTX_CF16_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF16_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_CF18_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF18_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_SHIFT 6
+ u8 flags7;
+#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED10_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 7
+ u8 flags8;
+#define XSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT 7
+ u8 flags9;
+#define XSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_FCOE_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_FCOE_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_FCOE_CONN_AG_CTX_CF16EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF16EN_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF17EN_SHIFT 7
+ u8 flags10;
+#define XSTORM_FCOE_CONN_AG_CTX_CF18EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF18EN_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_SHIFT 1
+#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_CF23EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF23EN_SHIFT 5
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED13_SHIFT 7
+ u8 flags11;
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED16_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED16_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_SHIFT 7
+ u8 flags12;
+#define XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_FCOE_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_FCOE_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RULE17EN_SHIFT 7
+ u8 flags13;
+#define XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define XSTORM_FCOE_CONN_AG_CTX_BIT16_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT16_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_BIT17_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT18_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT18_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_BIT19_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT19_SHIFT 3
+#define XSTORM_FCOE_CONN_AG_CTX_BIT20_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT20_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_BIT21_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT21_SHIFT 5
+#define XSTORM_FCOE_CONN_AG_CTX_CF23_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF23_SHIFT 6
+ u8 byte2;
+ __le16 physical_q0;
+ __le16 word1;
+ __le16 word2;
+ __le16 sq_cons;
+ __le16 sq_prod;
+ __le16 xferq_prod;
+ __le16 xferq_cons;
+ u8 byte3;
+ u8 byte4;
+ u8 byte5;
+ u8 byte6;
+ __le32 remain_io;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le32 reg4;
+ __le32 reg5;
+ __le32 reg6;
+ __le16 respq_prod;
+ __le16 respq_cons;
+ __le16 word9;
+ __le16 word10;
+ __le32 reg7;
+ __le32 reg8;
+};
+
+/* The fcoe storm context of Ustorm */
+struct ustorm_fcoe_conn_st_ctx {
+ struct regpair respq_pbl_addr;
+ __le16 num_pages_in_pbl;
+ u8 ptu_log_page_size;
+ u8 log_page_size;
+ __le16 respq_prod;
+ u8 reserved[2];
+};
+
+struct tstorm_fcoe_conn_ag_ctx {
+ u8 reserved0;
+ u8 state;
+ u8 flags0;
+#define TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define TSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_FCOE_CONN_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_FCOE_CONN_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_FCOE_CONN_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_FCOE_CONN_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_SHIFT 6
+ u8 flags1;
+#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 0
+#define TSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 2
+#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4
+#define TSTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 6
+ u8 flags2;
+#define TSTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_FCOE_CONN_AG_CTX_CF7_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_FCOE_CONN_AG_CTX_CF8_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT 6
+ u8 flags3;
+#define TSTORM_FCOE_CONN_AG_CTX_CF9_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT 0
+#define TSTORM_FCOE_CONN_AG_CTX_CF10_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT 2
+#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_SHIFT 4
+#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
+#define TSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 6
+#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
+ u8 flags4;
+#define TSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 0
+#define TSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT 5
+#define TSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT 6
+#define TSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags5;
+#define TSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define TSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7
+ __le32 reg0;
+ __le32 reg1;
+};
+
+struct ustorm_fcoe_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define USTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0
+#define USTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
+#define USTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2
+#define USTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
+#define USTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define USTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3
+#define USTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 2
+#define USTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3
+#define USTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 4
+#define USTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3
+#define USTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 6
+ u8 flags2;
+#define USTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 4
+#define USTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 5
+#define USTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags3;
+#define USTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le16 word1;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le16 word2;
+ __le16 word3;
+};
+
+/* The fcoe storm context of Tstorm */
+struct tstorm_fcoe_conn_st_ctx {
+ __le16 stat_ram_addr;
+ __le16 rx_max_fc_payload_len;
+ __le16 e_d_tov_val;
+ u8 flags;
+#define TSTORM_FCOE_CONN_ST_CTX_INC_SEQ_CNT_MASK 0x1
+#define TSTORM_FCOE_CONN_ST_CTX_INC_SEQ_CNT_SHIFT 0
+#define TSTORM_FCOE_CONN_ST_CTX_SUPPORT_CONF_MASK 0x1
+#define TSTORM_FCOE_CONN_ST_CTX_SUPPORT_CONF_SHIFT 1
+#define TSTORM_FCOE_CONN_ST_CTX_DEF_Q_IDX_MASK 0x3F
+#define TSTORM_FCOE_CONN_ST_CTX_DEF_Q_IDX_SHIFT 2
+ u8 timers_cleanup_invocation_cnt;
+ __le32 reserved1[2];
+ __le32 dst_mac_address_bytes_0_to_3;
+ __le16 dst_mac_address_bytes_4_to_5;
+ __le16 ramrod_echo;
+ u8 flags1;
+#define TSTORM_FCOE_CONN_ST_CTX_MODE_MASK 0x3
+#define TSTORM_FCOE_CONN_ST_CTX_MODE_SHIFT 0
+#define TSTORM_FCOE_CONN_ST_CTX_RESERVED_MASK 0x3F
+#define TSTORM_FCOE_CONN_ST_CTX_RESERVED_SHIFT 2
+ u8 cq_relative_offset;
+ u8 cmdq_relative_offset;
+ u8 bdq_resource_id;
+ u8 reserved0[4];
+};
+
+struct mstorm_fcoe_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define MSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1
+#define MSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0
+#define MSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define MSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7
+ __le16 word0;
+ __le16 word1;
+ __le32 reg0;
+ __le32 reg1;
+};
+
+/* Fast path part of the fcoe storm context of Mstorm */
+struct fcoe_mstorm_fcoe_conn_st_ctx_fp {
+ __le16 xfer_prod;
+ u8 num_cqs;
+ u8 reserved1;
+ u8 protection_info;
+#define FCOE_MSTORM_FCOE_CONN_ST_CTX_FP_SUPPORT_PROTECTION_MASK 0x1
+#define FCOE_MSTORM_FCOE_CONN_ST_CTX_FP_SUPPORT_PROTECTION_SHIFT 0
+#define FCOE_MSTORM_FCOE_CONN_ST_CTX_FP_VALID_MASK 0x1
+#define FCOE_MSTORM_FCOE_CONN_ST_CTX_FP_VALID_SHIFT 1
+#define FCOE_MSTORM_FCOE_CONN_ST_CTX_FP_RESERVED0_MASK 0x3F
+#define FCOE_MSTORM_FCOE_CONN_ST_CTX_FP_RESERVED0_SHIFT 2
+ u8 q_relative_offset;
+ u8 reserved2[2];
+};
+
+/* Non fast path part of the fcoe storm context of Mstorm */
+struct fcoe_mstorm_fcoe_conn_st_ctx_non_fp {
+ __le16 conn_id;
+ __le16 stat_ram_addr;
+ __le16 num_pages_in_pbl;
+ u8 ptu_log_page_size;
+ u8 log_page_size;
+ __le16 unsolicited_cq_count;
+ __le16 cmdq_count;
+ u8 bdq_resource_id;
+ u8 reserved0[3];
+ struct regpair xferq_pbl_addr;
+ struct regpair reserved1;
+ struct regpair reserved2[3];
+};
+
+/* The fcoe storm context of Mstorm */
+struct mstorm_fcoe_conn_st_ctx {
+ struct fcoe_mstorm_fcoe_conn_st_ctx_fp fp;
+ struct fcoe_mstorm_fcoe_conn_st_ctx_non_fp non_fp;
+};
+
+/* fcoe connection context */
+struct fcoe_conn_context {
+ struct ystorm_fcoe_conn_st_ctx ystorm_st_context;
+ struct pstorm_fcoe_conn_st_ctx pstorm_st_context;
+ struct regpair pstorm_st_padding[2];
+ struct xstorm_fcoe_conn_st_ctx xstorm_st_context;
+ struct xstorm_fcoe_conn_ag_ctx xstorm_ag_context;
+ struct regpair xstorm_ag_padding[6];
+ struct ustorm_fcoe_conn_st_ctx ustorm_st_context;
+ struct regpair ustorm_st_padding[2];
+ struct tstorm_fcoe_conn_ag_ctx tstorm_ag_context;
+ struct regpair tstorm_ag_padding[2];
+ struct timers_context timer_context;
+ struct ustorm_fcoe_conn_ag_ctx ustorm_ag_context;
+ struct tstorm_fcoe_conn_st_ctx tstorm_st_context;
+ struct mstorm_fcoe_conn_ag_ctx mstorm_ag_context;
+ struct mstorm_fcoe_conn_st_ctx mstorm_st_context;
+};
+
+/* FCoE connection offload params passed by driver to FW in FCoE offload
+ * ramrod.
+ */
+struct fcoe_conn_offload_ramrod_params {
+ struct fcoe_conn_offload_ramrod_data offload_ramrod_data;
+};
+
+/* FCoE connection terminate params passed by driver to FW in FCoE terminate
+ * conn ramrod.
+ */
+struct fcoe_conn_terminate_ramrod_params {
+ struct fcoe_conn_terminate_ramrod_data terminate_ramrod_data;
+};
+
+/* FCoE event type */
+enum fcoe_event_type {
+ FCOE_EVENT_INIT_FUNC,
+ FCOE_EVENT_DESTROY_FUNC,
+ FCOE_EVENT_STAT_FUNC,
+ FCOE_EVENT_OFFLOAD_CONN,
+ FCOE_EVENT_TERMINATE_CONN,
+ FCOE_EVENT_ERROR,
+ MAX_FCOE_EVENT_TYPE
+};
+
+/* FCoE init params passed by driver to FW in FCoE init ramrod */
+struct fcoe_init_ramrod_params {
+ struct fcoe_init_func_ramrod_data init_ramrod_data;
+};
+
+/* FCoE ramrod Command IDs */
+enum fcoe_ramrod_cmd_id {
+ FCOE_RAMROD_CMD_ID_INIT_FUNC,
+ FCOE_RAMROD_CMD_ID_DESTROY_FUNC,
+ FCOE_RAMROD_CMD_ID_STAT_FUNC,
+ FCOE_RAMROD_CMD_ID_OFFLOAD_CONN,
+ FCOE_RAMROD_CMD_ID_TERMINATE_CONN,
+ MAX_FCOE_RAMROD_CMD_ID
+};
+
+/* FCoE statistics params buffer passed by driver to FW in FCoE statistics
+ * ramrod.
+ */
+struct fcoe_stat_ramrod_params {
+ struct fcoe_stat_ramrod_data stat_ramrod_data;
+};
+
+struct ystorm_fcoe_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define YSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
+#define YSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
+#define YSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define YSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le32 reg0;
+ __le32 reg1;
+ __le16 word1;
+ __le16 word2;
+ __le16 word3;
+ __le16 word4;
+ __le32 reg2;
+ __le32 reg3;
+};
+
+/* The iscsi storm connection context of Ystorm */
+struct ystorm_iscsi_conn_st_ctx {
+ __le32 reserved[8];
+};
+
+/* Combined iSCSI and TCP storm connection of Pstorm */
+struct pstorm_iscsi_tcp_conn_st_ctx {
+ __le32 tcp[32];
+ __le32 iscsi[4];
+};
+
+/* The combined tcp and iscsi storm context of Xstorm */
+struct xstorm_iscsi_tcp_conn_st_ctx {
+ __le32 reserved_tcp[4];
+ __le32 reserved_iscsi[44];
+};
+
+struct xstorm_iscsi_conn_ag_ctx {
+ u8 cdu_validation;
+ u8 state;
+ u8 flags0;
+#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT6_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT6_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT7_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT7_SHIFT 7
+ u8 flags1;
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT8_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT8_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT9_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT9_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT10_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT10_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT12_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT13_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT14_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT14_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_SHIFT 7
+ u8 flags2;
+#define XSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6
+ u8 flags3;
+#define XSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 6
+ u8 flags4;
+#define XSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF11_SHIFT 6
+ u8 flags5;
+#define XSTORM_ISCSI_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_SHIFT 6
+ u8 flags6;
+#define XSTORM_ISCSI_CONN_AG_CTX_CF16_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF16_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_CF18_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF18_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_SHIFT 6
+ u8 flags7;
+#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 7
+ u8 flags8;
+#define XSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF9EN_SHIFT 7
+ u8 flags9;
+#define XSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_CF16EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF16EN_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF17EN_SHIFT 7
+ u8 flags10;
+#define XSTORM_ISCSI_CONN_AG_CTX_CF18EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF18EN_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_SHIFT 7
+ u8 flags11;
+#define XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_SHIFT 7
+ u8 flags12;
+#define XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_SHIFT 7
+ u8 flags13;
+#define XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT16_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT16_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT17_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT18_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT18_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT19_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT19_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT20_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT20_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_SHIFT 6
+ u8 byte2;
+ __le16 physical_q0;
+ __le16 physical_q1;
+ __le16 dummy_dorq_var;
+ __le16 sq_cons;
+ __le16 sq_prod;
+ __le16 word5;
+ __le16 slow_io_total_data_tx_update;
+ u8 byte3;
+ u8 byte4;
+ u8 byte5;
+ u8 byte6;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 more_to_send_seq;
+ __le32 reg4;
+ __le32 reg5;
+ __le32 hq_scan_next_relevant_ack;
+ __le16 r2tq_prod;
+ __le16 r2tq_cons;
+ __le16 hq_prod;
+ __le16 hq_cons;
+ __le32 remain_seq;
+ __le32 bytes_to_next_pdu;
+ __le32 hq_tcp_seq;
+ u8 byte7;
+ u8 byte8;
+ u8 byte9;
+ u8 byte10;
+ u8 byte11;
+ u8 byte12;
+ u8 byte13;
+ u8 byte14;
+ u8 byte15;
+ u8 e5_reserved;
+ __le16 word11;
+ __le32 reg10;
+ __le32 reg11;
+ __le32 exp_stat_sn;
+ __le32 ongoing_fast_rxmit_seq;
+ __le32 reg14;
+ __le32 reg15;
+ __le32 reg16;
+ __le32 reg17;
+};
+
+struct tstorm_iscsi_conn_ag_ctx {
+ u8 reserved0;
+ u8 state;
+ u8 flags0;
+#define TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 6
+ u8 flags1;
+#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_SHIFT 0
+#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_SHIFT 2
+#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4
+#define TSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 6
+ u8 flags2;
+#define TSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 6
+ u8 flags3;
+#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_SHIFT 2
+#define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_SHIFT 5
+#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_SHIFT 6
+#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7
+ u8 flags4;
+#define TSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 0
+#define TSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 5
+#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_EN_SHIFT 6
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags5;
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7
+ __le32 reg0;
+ __le32 reg1;
+ __le32 rx_tcp_checksum_err_cnt;
+ __le32 reg3;
+ __le32 reg4;
+ __le32 reg5;
+ __le32 reg6;
+ __le32 reg7;
+ __le32 reg8;
+ u8 cid_offload_cnt;
+ u8 byte3;
+ __le16 word0;
+};
+
+struct ustorm_iscsi_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define USTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
+#define USTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
+#define USTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
+#define USTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
+#define USTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define USTORM_ISCSI_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_ISCSI_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
+#define USTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 2
+#define USTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3
+#define USTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 4
+#define USTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3
+#define USTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 6
+ u8 flags2;
+#define USTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_ISCSI_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 4
+#define USTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 5
+#define USTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags3;
+#define USTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le16 word1;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le16 word2;
+ __le16 word3;
+};
+
+/* The iscsi storm connection context of Tstorm */
+struct tstorm_iscsi_conn_st_ctx {
+ __le32 reserved[44];
+};
+
+struct mstorm_iscsi_conn_ag_ctx {
+ u8 reserved;
+ u8 state;
+ u8 flags0;
+#define MSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
+#define MSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define MSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7
+ __le16 word0;
+ __le16 word1;
+ __le32 reg0;
+ __le32 reg1;
+};
+
+/* Combined iSCSI and TCP storm connection of Mstorm */
+struct mstorm_iscsi_tcp_conn_st_ctx {
+ __le32 reserved_tcp[20];
+ __le32 reserved_iscsi[12];
+};
+
+/* The iscsi storm context of Ustorm */
+struct ustorm_iscsi_conn_st_ctx {
+ __le32 reserved[52];
+};
+
+/* iscsi connection context */
+struct iscsi_conn_context {
+ struct ystorm_iscsi_conn_st_ctx ystorm_st_context;
+ struct pstorm_iscsi_tcp_conn_st_ctx pstorm_st_context;
+ struct regpair pstorm_st_padding[2];
+ struct pb_context xpb2_context;
+ struct xstorm_iscsi_tcp_conn_st_ctx xstorm_st_context;
+ struct regpair xstorm_st_padding[2];
+ struct xstorm_iscsi_conn_ag_ctx xstorm_ag_context;
+ struct tstorm_iscsi_conn_ag_ctx tstorm_ag_context;
+ struct regpair tstorm_ag_padding[2];
+ struct timers_context timer_context;
+ struct ustorm_iscsi_conn_ag_ctx ustorm_ag_context;
+ struct pb_context upb_context;
+ struct tstorm_iscsi_conn_st_ctx tstorm_st_context;
+ struct regpair tstorm_st_padding[2];
+ struct mstorm_iscsi_conn_ag_ctx mstorm_ag_context;
+ struct mstorm_iscsi_tcp_conn_st_ctx mstorm_st_context;
+ struct ustorm_iscsi_conn_st_ctx ustorm_st_context;
+};
+
+/* iSCSI init params passed by driver to FW in iSCSI init ramrod */
+struct iscsi_init_ramrod_params {
+ struct iscsi_spe_func_init iscsi_init_spe;
+ struct tcp_init_params tcp_init;
+};
+
+struct ystorm_iscsi_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define YSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
+#define YSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
+#define YSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define YSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le32 reg0;
+ __le32 reg1;
+ __le16 word1;
+ __le16 word2;
+ __le16 word3;
+ __le16 word4;
+ __le32 reg2;
+ __le32 reg3;
+};
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c
new file mode 100644
index 000000000..6263f847b
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c
@@ -0,0 +1,932 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ * Copyright (c) 2019-2020 Marvell International Ltd.
+ */
+
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/qed/qed_chain.h>
+#include "qed.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_reg_addr.h"
+#include "qed_sriov.h"
+
+#define QED_BAR_ACQUIRE_TIMEOUT_USLEEP_CNT 1000
+#define QED_BAR_ACQUIRE_TIMEOUT_USLEEP 1000
+#define QED_BAR_ACQUIRE_TIMEOUT_UDELAY_CNT 100000
+#define QED_BAR_ACQUIRE_TIMEOUT_UDELAY 10
+
+/* Invalid values */
+#define QED_BAR_INVALID_OFFSET (cpu_to_le32(-1))
+
+struct qed_ptt {
+ struct list_head list_entry;
+ unsigned int idx;
+ struct pxp_ptt_entry pxp;
+ u8 hwfn_id;
+};
+
+struct qed_ptt_pool {
+ struct list_head free_list;
+ spinlock_t lock; /* ptt synchronized access */
+ struct qed_ptt ptts[PXP_EXTERNAL_BAR_PF_WINDOW_NUM];
+};
+
+int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ptt_pool *p_pool = kmalloc(sizeof(*p_pool), GFP_KERNEL);
+ int i;
+
+ if (!p_pool)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&p_pool->free_list);
+ for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
+ p_pool->ptts[i].idx = i;
+ p_pool->ptts[i].pxp.offset = QED_BAR_INVALID_OFFSET;
+ p_pool->ptts[i].pxp.pretend.control = 0;
+ p_pool->ptts[i].hwfn_id = p_hwfn->my_id;
+ if (i >= RESERVED_PTT_MAX)
+ list_add(&p_pool->ptts[i].list_entry,
+ &p_pool->free_list);
+ }
+
+ p_hwfn->p_ptt_pool = p_pool;
+ spin_lock_init(&p_pool->lock);
+
+ return 0;
+}
+
+void qed_ptt_invalidate(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ptt *p_ptt;
+ int i;
+
+ for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
+ p_ptt = &p_hwfn->p_ptt_pool->ptts[i];
+ p_ptt->pxp.offset = QED_BAR_INVALID_OFFSET;
+ }
+}
+
+void qed_ptt_pool_free(struct qed_hwfn *p_hwfn)
+{
+ kfree(p_hwfn->p_ptt_pool);
+ p_hwfn->p_ptt_pool = NULL;
+}
+
+struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn)
+{
+ return qed_ptt_acquire_context(p_hwfn, false);
+}
+
+struct qed_ptt *qed_ptt_acquire_context(struct qed_hwfn *p_hwfn, bool is_atomic)
+{
+ struct qed_ptt *p_ptt;
+ unsigned int i, count;
+
+ if (is_atomic)
+ count = QED_BAR_ACQUIRE_TIMEOUT_UDELAY_CNT;
+ else
+ count = QED_BAR_ACQUIRE_TIMEOUT_USLEEP_CNT;
+
+ /* Take the free PTT from the list */
+ for (i = 0; i < count; i++) {
+ spin_lock_bh(&p_hwfn->p_ptt_pool->lock);
+
+ if (!list_empty(&p_hwfn->p_ptt_pool->free_list)) {
+ p_ptt = list_first_entry(&p_hwfn->p_ptt_pool->free_list,
+ struct qed_ptt, list_entry);
+ list_del(&p_ptt->list_entry);
+
+ spin_unlock_bh(&p_hwfn->p_ptt_pool->lock);
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+ "allocated ptt %d\n", p_ptt->idx);
+ return p_ptt;
+ }
+
+ spin_unlock_bh(&p_hwfn->p_ptt_pool->lock);
+
+ if (is_atomic)
+ udelay(QED_BAR_ACQUIRE_TIMEOUT_UDELAY);
+ else
+ usleep_range(QED_BAR_ACQUIRE_TIMEOUT_USLEEP,
+ QED_BAR_ACQUIRE_TIMEOUT_USLEEP * 2);
+ }
+
+ DP_NOTICE(p_hwfn, "PTT acquire timeout - failed to allocate PTT\n");
+ return NULL;
+}
+
+void qed_ptt_release(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ spin_lock_bh(&p_hwfn->p_ptt_pool->lock);
+ list_add(&p_ptt->list_entry, &p_hwfn->p_ptt_pool->free_list);
+ spin_unlock_bh(&p_hwfn->p_ptt_pool->lock);
+}
+
+u32 qed_ptt_get_hw_addr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ /* The HW is using DWORDS and we need to translate it to Bytes */
+ return le32_to_cpu(p_ptt->pxp.offset) << 2;
+}
+
+static u32 qed_ptt_config_addr(struct qed_ptt *p_ptt)
+{
+ return PXP_PF_WINDOW_ADMIN_PER_PF_START +
+ p_ptt->idx * sizeof(struct pxp_ptt_entry);
+}
+
+u32 qed_ptt_get_bar_addr(struct qed_ptt *p_ptt)
+{
+ return PXP_EXTERNAL_BAR_PF_WINDOW_START +
+ p_ptt->idx * PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE;
+}
+
+void qed_ptt_set_win(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 new_hw_addr)
+{
+ u32 prev_hw_addr;
+
+ prev_hw_addr = qed_ptt_get_hw_addr(p_hwfn, p_ptt);
+
+ if (new_hw_addr == prev_hw_addr)
+ return;
+
+ /* Update PTT entery in admin window */
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+ "Updating PTT entry %d to offset 0x%x\n",
+ p_ptt->idx, new_hw_addr);
+
+ /* The HW is using DWORDS and the address is in Bytes */
+ p_ptt->pxp.offset = cpu_to_le32(new_hw_addr >> 2);
+
+ REG_WR(p_hwfn,
+ qed_ptt_config_addr(p_ptt) +
+ offsetof(struct pxp_ptt_entry, offset),
+ le32_to_cpu(p_ptt->pxp.offset));
+}
+
+static u32 qed_set_ptt(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 hw_addr)
+{
+ u32 win_hw_addr = qed_ptt_get_hw_addr(p_hwfn, p_ptt);
+ u32 offset;
+
+ offset = hw_addr - win_hw_addr;
+
+ if (p_ptt->hwfn_id != p_hwfn->my_id)
+ DP_NOTICE(p_hwfn,
+ "ptt[%d] of hwfn[%02x] is used by hwfn[%02x]!\n",
+ p_ptt->idx, p_ptt->hwfn_id, p_hwfn->my_id);
+
+ /* Verify the address is within the window */
+ if (hw_addr < win_hw_addr ||
+ offset >= PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE) {
+ qed_ptt_set_win(p_hwfn, p_ptt, hw_addr);
+ offset = 0;
+ }
+
+ return qed_ptt_get_bar_addr(p_ptt) + offset;
+}
+
+struct qed_ptt *qed_get_reserved_ptt(struct qed_hwfn *p_hwfn,
+ enum reserved_ptts ptt_idx)
+{
+ if (ptt_idx >= RESERVED_PTT_MAX) {
+ DP_NOTICE(p_hwfn,
+ "Requested PTT %d is out of range\n", ptt_idx);
+ return NULL;
+ }
+
+ return &p_hwfn->p_ptt_pool->ptts[ptt_idx];
+}
+
+void qed_wr(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 hw_addr, u32 val)
+{
+ u32 bar_addr = qed_set_ptt(p_hwfn, p_ptt, hw_addr);
+
+ REG_WR(p_hwfn, bar_addr, val);
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+ "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
+ bar_addr, hw_addr, val);
+}
+
+u32 qed_rd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 hw_addr)
+{
+ u32 bar_addr = qed_set_ptt(p_hwfn, p_ptt, hw_addr);
+ u32 val = REG_RD(p_hwfn, bar_addr);
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+ "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
+ bar_addr, hw_addr, val);
+
+ return val;
+}
+
+static void qed_memcpy_hw(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ void *addr, u32 hw_addr, size_t n, bool to_device)
+{
+ u32 dw_count, *host_addr, hw_offset;
+ size_t quota, done = 0;
+ u32 __iomem *reg_addr;
+
+ while (done < n) {
+ quota = min_t(size_t, n - done,
+ PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE);
+
+ if (IS_PF(p_hwfn->cdev)) {
+ qed_ptt_set_win(p_hwfn, p_ptt, hw_addr + done);
+ hw_offset = qed_ptt_get_bar_addr(p_ptt);
+ } else {
+ hw_offset = hw_addr + done;
+ }
+
+ dw_count = quota / 4;
+ host_addr = (u32 *)((u8 *)addr + done);
+ reg_addr = (u32 __iomem *)REG_ADDR(p_hwfn, hw_offset);
+ if (to_device)
+ while (dw_count--)
+ DIRECT_REG_WR(reg_addr++, *host_addr++);
+ else
+ while (dw_count--)
+ *host_addr++ = DIRECT_REG_RD(reg_addr++);
+
+ done += quota;
+ }
+}
+
+void qed_memcpy_from(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, void *dest, u32 hw_addr, size_t n)
+{
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+ "hw_addr 0x%x, dest %p hw_addr 0x%x, size %lu\n",
+ hw_addr, dest, hw_addr, (unsigned long)n);
+
+ qed_memcpy_hw(p_hwfn, p_ptt, dest, hw_addr, n, false);
+}
+
+void qed_memcpy_to(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 hw_addr, void *src, size_t n)
+{
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+ "hw_addr 0x%x, hw_addr 0x%x, src %p size %lu\n",
+ hw_addr, hw_addr, src, (unsigned long)n);
+
+ qed_memcpy_hw(p_hwfn, p_ptt, src, hw_addr, n, true);
+}
+
+void qed_fid_pretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 fid)
+{
+ u16 control = 0;
+
+ SET_FIELD(control, PXP_PRETEND_CMD_IS_CONCRETE, 1);
+ SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_FUNCTION, 1);
+
+ /* Every pretend undos previous pretends, including
+ * previous port pretend.
+ */
+ SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
+ SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
+ SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
+
+ if (!GET_FIELD(fid, PXP_CONCRETE_FID_VFVALID))
+ fid = GET_FIELD(fid, PXP_CONCRETE_FID_PFID);
+
+ p_ptt->pxp.pretend.control = cpu_to_le16(control);
+ p_ptt->pxp.pretend.fid.concrete_fid.fid = cpu_to_le16(fid);
+
+ REG_WR(p_hwfn,
+ qed_ptt_config_addr(p_ptt) +
+ offsetof(struct pxp_ptt_entry, pretend),
+ *(u32 *)&p_ptt->pxp.pretend);
+}
+
+void qed_port_pretend(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 port_id)
+{
+ u16 control = 0;
+
+ SET_FIELD(control, PXP_PRETEND_CMD_PORT, port_id);
+ SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 1);
+ SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
+
+ p_ptt->pxp.pretend.control = cpu_to_le16(control);
+
+ REG_WR(p_hwfn,
+ qed_ptt_config_addr(p_ptt) +
+ offsetof(struct pxp_ptt_entry, pretend),
+ *(u32 *)&p_ptt->pxp.pretend);
+}
+
+void qed_port_unpretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u16 control = 0;
+
+ SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
+ SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
+ SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
+
+ p_ptt->pxp.pretend.control = cpu_to_le16(control);
+
+ REG_WR(p_hwfn,
+ qed_ptt_config_addr(p_ptt) +
+ offsetof(struct pxp_ptt_entry, pretend),
+ *(u32 *)&p_ptt->pxp.pretend);
+}
+
+void qed_port_fid_pretend(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 port_id, u16 fid)
+{
+ u16 control = 0;
+
+ SET_FIELD(control, PXP_PRETEND_CMD_PORT, port_id);
+ SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 1);
+ SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
+ SET_FIELD(control, PXP_PRETEND_CMD_IS_CONCRETE, 1);
+ SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_FUNCTION, 1);
+ if (!GET_FIELD(fid, PXP_CONCRETE_FID_VFVALID))
+ fid = GET_FIELD(fid, PXP_CONCRETE_FID_PFID);
+ p_ptt->pxp.pretend.control = cpu_to_le16(control);
+ p_ptt->pxp.pretend.fid.concrete_fid.fid = cpu_to_le16(fid);
+ REG_WR(p_hwfn,
+ qed_ptt_config_addr(p_ptt) +
+ offsetof(struct pxp_ptt_entry, pretend),
+ *(u32 *)&p_ptt->pxp.pretend);
+}
+
+u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid)
+{
+ u32 concrete_fid = 0;
+
+ SET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID, p_hwfn->rel_pf_id);
+ SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID, vfid);
+ SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFVALID, 1);
+
+ return concrete_fid;
+}
+
+/* DMAE */
+#define QED_DMAE_FLAGS_IS_SET(params, flag) \
+ ((params) != NULL && GET_FIELD((params)->flags, QED_DMAE_PARAMS_##flag))
+
+static void qed_dmae_opcode(struct qed_hwfn *p_hwfn,
+ const u8 is_src_type_grc,
+ const u8 is_dst_type_grc,
+ struct qed_dmae_params *p_params)
+{
+ u8 src_pfid, dst_pfid, port_id;
+ u16 opcode_b = 0;
+ u32 opcode = 0;
+
+ /* Whether the source is the PCIe or the GRC.
+ * 0- The source is the PCIe
+ * 1- The source is the GRC.
+ */
+ SET_FIELD(opcode, DMAE_CMD_SRC,
+ (is_src_type_grc ? dmae_cmd_src_grc : dmae_cmd_src_pcie));
+ src_pfid = QED_DMAE_FLAGS_IS_SET(p_params, SRC_PF_VALID) ?
+ p_params->src_pfid : p_hwfn->rel_pf_id;
+ SET_FIELD(opcode, DMAE_CMD_SRC_PF_ID, src_pfid);
+
+ /* The destination of the DMA can be: 0-None 1-PCIe 2-GRC 3-None */
+ SET_FIELD(opcode, DMAE_CMD_DST,
+ (is_dst_type_grc ? dmae_cmd_dst_grc : dmae_cmd_dst_pcie));
+ dst_pfid = QED_DMAE_FLAGS_IS_SET(p_params, DST_PF_VALID) ?
+ p_params->dst_pfid : p_hwfn->rel_pf_id;
+ SET_FIELD(opcode, DMAE_CMD_DST_PF_ID, dst_pfid);
+
+
+ /* Whether to write a completion word to the completion destination:
+ * 0-Do not write a completion word
+ * 1-Write the completion word
+ */
+ SET_FIELD(opcode, DMAE_CMD_COMP_WORD_EN, 1);
+ SET_FIELD(opcode, DMAE_CMD_SRC_ADDR_RESET, 1);
+
+ if (QED_DMAE_FLAGS_IS_SET(p_params, COMPLETION_DST))
+ SET_FIELD(opcode, DMAE_CMD_COMP_FUNC, 1);
+
+ /* swapping mode 3 - big endian */
+ SET_FIELD(opcode, DMAE_CMD_ENDIANITY_MODE, DMAE_CMD_ENDIANITY);
+
+ port_id = (QED_DMAE_FLAGS_IS_SET(p_params, PORT_VALID)) ?
+ p_params->port_id : p_hwfn->port_id;
+ SET_FIELD(opcode, DMAE_CMD_PORT_ID, port_id);
+
+ /* reset source address in next go */
+ SET_FIELD(opcode, DMAE_CMD_SRC_ADDR_RESET, 1);
+
+ /* reset dest address in next go */
+ SET_FIELD(opcode, DMAE_CMD_DST_ADDR_RESET, 1);
+
+ /* SRC/DST VFID: all 1's - pf, otherwise VF id */
+ if (QED_DMAE_FLAGS_IS_SET(p_params, SRC_VF_VALID)) {
+ SET_FIELD(opcode, DMAE_CMD_SRC_VF_ID_VALID, 1);
+ SET_FIELD(opcode_b, DMAE_CMD_SRC_VF_ID, p_params->src_vfid);
+ } else {
+ SET_FIELD(opcode_b, DMAE_CMD_SRC_VF_ID, 0xFF);
+ }
+ if (QED_DMAE_FLAGS_IS_SET(p_params, DST_VF_VALID)) {
+ SET_FIELD(opcode, DMAE_CMD_DST_VF_ID_VALID, 1);
+ SET_FIELD(opcode_b, DMAE_CMD_DST_VF_ID, p_params->dst_vfid);
+ } else {
+ SET_FIELD(opcode_b, DMAE_CMD_DST_VF_ID, 0xFF);
+ }
+
+ p_hwfn->dmae_info.p_dmae_cmd->opcode = cpu_to_le32(opcode);
+ p_hwfn->dmae_info.p_dmae_cmd->opcode_b = cpu_to_le16(opcode_b);
+}
+
+u32 qed_dmae_idx_to_go_cmd(u8 idx)
+{
+ /* All the DMAE 'go' registers form an array in internal memory */
+ return DMAE_REG_GO_C0 + (idx << 2);
+}
+
+static int qed_dmae_post_command(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct dmae_cmd *p_command = p_hwfn->dmae_info.p_dmae_cmd;
+ u8 idx_cmd = p_hwfn->dmae_info.channel, i;
+ int qed_status = 0;
+
+ /* verify address is not NULL */
+ if ((((!p_command->dst_addr_lo) && (!p_command->dst_addr_hi)) ||
+ ((!p_command->src_addr_lo) && (!p_command->src_addr_hi)))) {
+ DP_NOTICE(p_hwfn,
+ "source or destination address 0 idx_cmd=%d\n"
+ "opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n",
+ idx_cmd,
+ le32_to_cpu(p_command->opcode),
+ le16_to_cpu(p_command->opcode_b),
+ le16_to_cpu(p_command->length_dw),
+ le32_to_cpu(p_command->src_addr_hi),
+ le32_to_cpu(p_command->src_addr_lo),
+ le32_to_cpu(p_command->dst_addr_hi),
+ le32_to_cpu(p_command->dst_addr_lo));
+
+ return -EINVAL;
+ }
+
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_HW,
+ "Posting DMAE command [idx %d]: opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n",
+ idx_cmd,
+ le32_to_cpu(p_command->opcode),
+ le16_to_cpu(p_command->opcode_b),
+ le16_to_cpu(p_command->length_dw),
+ le32_to_cpu(p_command->src_addr_hi),
+ le32_to_cpu(p_command->src_addr_lo),
+ le32_to_cpu(p_command->dst_addr_hi),
+ le32_to_cpu(p_command->dst_addr_lo));
+
+ /* Copy the command to DMAE - need to do it before every call
+ * for source/dest address no reset.
+ * The first 9 DWs are the command registers, the 10 DW is the
+ * GO register, and the rest are result registers
+ * (which are read only by the client).
+ */
+ for (i = 0; i < DMAE_CMD_SIZE; i++) {
+ u32 data = (i < DMAE_CMD_SIZE_TO_FILL) ?
+ *(((u32 *)p_command) + i) : 0;
+
+ qed_wr(p_hwfn, p_ptt,
+ DMAE_REG_CMD_MEM +
+ (idx_cmd * DMAE_CMD_SIZE * sizeof(u32)) +
+ (i * sizeof(u32)), data);
+ }
+
+ qed_wr(p_hwfn, p_ptt, qed_dmae_idx_to_go_cmd(idx_cmd), DMAE_GO_VALUE);
+
+ return qed_status;
+}
+
+int qed_dmae_info_alloc(struct qed_hwfn *p_hwfn)
+{
+ dma_addr_t *p_addr = &p_hwfn->dmae_info.completion_word_phys_addr;
+ struct dmae_cmd **p_cmd = &p_hwfn->dmae_info.p_dmae_cmd;
+ u32 **p_buff = &p_hwfn->dmae_info.p_intermediate_buffer;
+ u32 **p_comp = &p_hwfn->dmae_info.p_completion_word;
+
+ *p_comp = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(u32), p_addr, GFP_KERNEL);
+ if (!*p_comp)
+ goto err;
+
+ p_addr = &p_hwfn->dmae_info.dmae_cmd_phys_addr;
+ *p_cmd = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(struct dmae_cmd),
+ p_addr, GFP_KERNEL);
+ if (!*p_cmd)
+ goto err;
+
+ p_addr = &p_hwfn->dmae_info.intermediate_buffer_phys_addr;
+ *p_buff = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(u32) * DMAE_MAX_RW_SIZE,
+ p_addr, GFP_KERNEL);
+ if (!*p_buff)
+ goto err;
+
+ p_hwfn->dmae_info.channel = p_hwfn->rel_pf_id;
+
+ return 0;
+err:
+ qed_dmae_info_free(p_hwfn);
+ return -ENOMEM;
+}
+
+void qed_dmae_info_free(struct qed_hwfn *p_hwfn)
+{
+ dma_addr_t p_phys;
+
+ /* Just make sure no one is in the middle */
+ mutex_lock(&p_hwfn->dmae_info.mutex);
+
+ if (p_hwfn->dmae_info.p_completion_word) {
+ p_phys = p_hwfn->dmae_info.completion_word_phys_addr;
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(u32),
+ p_hwfn->dmae_info.p_completion_word, p_phys);
+ p_hwfn->dmae_info.p_completion_word = NULL;
+ }
+
+ if (p_hwfn->dmae_info.p_dmae_cmd) {
+ p_phys = p_hwfn->dmae_info.dmae_cmd_phys_addr;
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(struct dmae_cmd),
+ p_hwfn->dmae_info.p_dmae_cmd, p_phys);
+ p_hwfn->dmae_info.p_dmae_cmd = NULL;
+ }
+
+ if (p_hwfn->dmae_info.p_intermediate_buffer) {
+ p_phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(u32) * DMAE_MAX_RW_SIZE,
+ p_hwfn->dmae_info.p_intermediate_buffer,
+ p_phys);
+ p_hwfn->dmae_info.p_intermediate_buffer = NULL;
+ }
+
+ mutex_unlock(&p_hwfn->dmae_info.mutex);
+}
+
+static int qed_dmae_operation_wait(struct qed_hwfn *p_hwfn)
+{
+ u32 wait_cnt_limit = 10000, wait_cnt = 0;
+ int qed_status = 0;
+
+ barrier();
+ while (*p_hwfn->dmae_info.p_completion_word != DMAE_COMPLETION_VAL) {
+ udelay(DMAE_MIN_WAIT_TIME);
+ if (++wait_cnt > wait_cnt_limit) {
+ DP_NOTICE(p_hwfn->cdev,
+ "Timed-out waiting for operation to complete. Completion word is 0x%08x expected 0x%08x.\n",
+ *p_hwfn->dmae_info.p_completion_word,
+ DMAE_COMPLETION_VAL);
+ qed_status = -EBUSY;
+ break;
+ }
+
+ /* to sync the completion_word since we are not
+ * using the volatile keyword for p_completion_word
+ */
+ barrier();
+ }
+
+ if (qed_status == 0)
+ *p_hwfn->dmae_info.p_completion_word = 0;
+
+ return qed_status;
+}
+
+static int qed_dmae_execute_sub_operation(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u64 src_addr,
+ u64 dst_addr,
+ u8 src_type,
+ u8 dst_type,
+ u32 length_dw)
+{
+ dma_addr_t phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
+ struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
+ int qed_status = 0;
+
+ switch (src_type) {
+ case QED_DMAE_ADDRESS_GRC:
+ case QED_DMAE_ADDRESS_HOST_PHYS:
+ cmd->src_addr_hi = cpu_to_le32(upper_32_bits(src_addr));
+ cmd->src_addr_lo = cpu_to_le32(lower_32_bits(src_addr));
+ break;
+ /* for virtual source addresses we use the intermediate buffer. */
+ case QED_DMAE_ADDRESS_HOST_VIRT:
+ cmd->src_addr_hi = cpu_to_le32(upper_32_bits(phys));
+ cmd->src_addr_lo = cpu_to_le32(lower_32_bits(phys));
+ memcpy(&p_hwfn->dmae_info.p_intermediate_buffer[0],
+ (void *)(uintptr_t)src_addr,
+ length_dw * sizeof(u32));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (dst_type) {
+ case QED_DMAE_ADDRESS_GRC:
+ case QED_DMAE_ADDRESS_HOST_PHYS:
+ cmd->dst_addr_hi = cpu_to_le32(upper_32_bits(dst_addr));
+ cmd->dst_addr_lo = cpu_to_le32(lower_32_bits(dst_addr));
+ break;
+ /* for virtual source addresses we use the intermediate buffer. */
+ case QED_DMAE_ADDRESS_HOST_VIRT:
+ cmd->dst_addr_hi = cpu_to_le32(upper_32_bits(phys));
+ cmd->dst_addr_lo = cpu_to_le32(lower_32_bits(phys));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ cmd->length_dw = cpu_to_le16((u16)length_dw);
+
+ qed_dmae_post_command(p_hwfn, p_ptt);
+
+ qed_status = qed_dmae_operation_wait(p_hwfn);
+
+ if (qed_status) {
+ DP_NOTICE(p_hwfn,
+ "qed_dmae_host2grc: Wait Failed. source_addr 0x%llx, grc_addr 0x%llx, size_in_dwords 0x%x\n",
+ src_addr, dst_addr, length_dw);
+ return qed_status;
+ }
+
+ if (dst_type == QED_DMAE_ADDRESS_HOST_VIRT)
+ memcpy((void *)(uintptr_t)(dst_addr),
+ &p_hwfn->dmae_info.p_intermediate_buffer[0],
+ length_dw * sizeof(u32));
+
+ return 0;
+}
+
+static int qed_dmae_execute_command(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u64 src_addr, u64 dst_addr,
+ u8 src_type, u8 dst_type,
+ u32 size_in_dwords,
+ struct qed_dmae_params *p_params)
+{
+ dma_addr_t phys = p_hwfn->dmae_info.completion_word_phys_addr;
+ u16 length_cur = 0, i = 0, cnt_split = 0, length_mod = 0;
+ struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
+ u64 src_addr_split = 0, dst_addr_split = 0;
+ u16 length_limit = DMAE_MAX_RW_SIZE;
+ int qed_status = 0;
+ u32 offset = 0;
+
+ if (p_hwfn->cdev->recov_in_prog) {
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_HW,
+ "Recovery is in progress. Avoid DMAE transaction [{src: addr 0x%llx, type %d}, {dst: addr 0x%llx, type %d}, size %d].\n",
+ src_addr, src_type, dst_addr, dst_type,
+ size_in_dwords);
+
+ /* Let the flow complete w/o any error handling */
+ return 0;
+ }
+
+ qed_dmae_opcode(p_hwfn,
+ (src_type == QED_DMAE_ADDRESS_GRC),
+ (dst_type == QED_DMAE_ADDRESS_GRC),
+ p_params);
+
+ cmd->comp_addr_lo = cpu_to_le32(lower_32_bits(phys));
+ cmd->comp_addr_hi = cpu_to_le32(upper_32_bits(phys));
+ cmd->comp_val = cpu_to_le32(DMAE_COMPLETION_VAL);
+
+ /* Check if the grc_addr is valid like < MAX_GRC_OFFSET */
+ cnt_split = size_in_dwords / length_limit;
+ length_mod = size_in_dwords % length_limit;
+
+ src_addr_split = src_addr;
+ dst_addr_split = dst_addr;
+
+ for (i = 0; i <= cnt_split; i++) {
+ offset = length_limit * i;
+
+ if (!QED_DMAE_FLAGS_IS_SET(p_params, RW_REPL_SRC)) {
+ if (src_type == QED_DMAE_ADDRESS_GRC)
+ src_addr_split = src_addr + offset;
+ else
+ src_addr_split = src_addr + (offset * 4);
+ }
+
+ if (dst_type == QED_DMAE_ADDRESS_GRC)
+ dst_addr_split = dst_addr + offset;
+ else
+ dst_addr_split = dst_addr + (offset * 4);
+
+ length_cur = (cnt_split == i) ? length_mod : length_limit;
+
+ /* might be zero on last iteration */
+ if (!length_cur)
+ continue;
+
+ qed_status = qed_dmae_execute_sub_operation(p_hwfn,
+ p_ptt,
+ src_addr_split,
+ dst_addr_split,
+ src_type,
+ dst_type,
+ length_cur);
+ if (qed_status) {
+ qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_DMAE_FAIL,
+ "qed_dmae_execute_sub_operation Failed with error 0x%x. source_addr 0x%llx, destination addr 0x%llx, size_in_dwords 0x%x\n",
+ qed_status, src_addr,
+ dst_addr, length_cur);
+ break;
+ }
+ }
+
+ return qed_status;
+}
+
+int qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u64 source_addr, u32 grc_addr, u32 size_in_dwords,
+ struct qed_dmae_params *p_params)
+{
+ u32 grc_addr_in_dw = grc_addr / sizeof(u32);
+ int rc;
+
+
+ mutex_lock(&p_hwfn->dmae_info.mutex);
+
+ rc = qed_dmae_execute_command(p_hwfn, p_ptt, source_addr,
+ grc_addr_in_dw,
+ QED_DMAE_ADDRESS_HOST_VIRT,
+ QED_DMAE_ADDRESS_GRC,
+ size_in_dwords, p_params);
+
+ mutex_unlock(&p_hwfn->dmae_info.mutex);
+
+ return rc;
+}
+
+int qed_dmae_grc2host(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 grc_addr,
+ dma_addr_t dest_addr, u32 size_in_dwords,
+ struct qed_dmae_params *p_params)
+{
+ u32 grc_addr_in_dw = grc_addr / sizeof(u32);
+ int rc;
+
+
+ mutex_lock(&p_hwfn->dmae_info.mutex);
+
+ rc = qed_dmae_execute_command(p_hwfn, p_ptt, grc_addr_in_dw,
+ dest_addr, QED_DMAE_ADDRESS_GRC,
+ QED_DMAE_ADDRESS_HOST_VIRT,
+ size_in_dwords, p_params);
+
+ mutex_unlock(&p_hwfn->dmae_info.mutex);
+
+ return rc;
+}
+
+int qed_dmae_host2host(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ dma_addr_t source_addr,
+ dma_addr_t dest_addr,
+ u32 size_in_dwords, struct qed_dmae_params *p_params)
+{
+ int rc;
+
+ mutex_lock(&(p_hwfn->dmae_info.mutex));
+
+ rc = qed_dmae_execute_command(p_hwfn, p_ptt, source_addr,
+ dest_addr,
+ QED_DMAE_ADDRESS_HOST_PHYS,
+ QED_DMAE_ADDRESS_HOST_PHYS,
+ size_in_dwords, p_params);
+
+ mutex_unlock(&(p_hwfn->dmae_info.mutex));
+
+ return rc;
+}
+
+void qed_hw_err_notify(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ enum qed_hw_err_type err_type, const char *fmt, ...)
+{
+ char buf[QED_HW_ERR_MAX_STR_SIZE];
+ va_list vl;
+ int len;
+
+ if (fmt) {
+ va_start(vl, fmt);
+ len = vsnprintf(buf, QED_HW_ERR_MAX_STR_SIZE, fmt, vl);
+ va_end(vl);
+
+ if (len > QED_HW_ERR_MAX_STR_SIZE - 1)
+ len = QED_HW_ERR_MAX_STR_SIZE - 1;
+
+ DP_NOTICE(p_hwfn, "%s", buf);
+ }
+
+ /* Fan failure cannot be masked by handling of another HW error */
+ if (p_hwfn->cdev->recov_in_prog &&
+ err_type != QED_HW_ERR_FAN_FAIL) {
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_DRV,
+ "Recovery is in progress. Avoid notifying about HW error %d.\n",
+ err_type);
+ return;
+ }
+
+ qed_hw_error_occurred(p_hwfn, err_type);
+
+ if (fmt)
+ qed_mcp_send_raw_debug_data(p_hwfn, p_ptt, buf, len);
+}
+
+int qed_dmae_sanity(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, const char *phase)
+{
+ u32 size = PAGE_SIZE / 2, val;
+ int rc = 0;
+ dma_addr_t p_phys;
+ void *p_virt;
+ u32 *p_tmp;
+
+ p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ 2 * size, &p_phys, GFP_KERNEL);
+ if (!p_virt) {
+ DP_NOTICE(p_hwfn,
+ "DMAE sanity [%s]: failed to allocate memory\n",
+ phase);
+ return -ENOMEM;
+ }
+
+ /* Fill the bottom half of the allocated memory with a known pattern */
+ for (p_tmp = (u32 *)p_virt;
+ p_tmp < (u32 *)((u8 *)p_virt + size); p_tmp++) {
+ /* Save the address itself as the value */
+ val = (u32)(uintptr_t)p_tmp;
+ *p_tmp = val;
+ }
+
+ /* Zero the top half of the allocated memory */
+ memset((u8 *)p_virt + size, 0, size);
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SP,
+ "DMAE sanity [%s]: src_addr={phys 0x%llx, virt %p}, dst_addr={phys 0x%llx, virt %p}, size 0x%x\n",
+ phase,
+ (u64)p_phys,
+ p_virt, (u64)(p_phys + size), (u8 *)p_virt + size, size);
+
+ rc = qed_dmae_host2host(p_hwfn, p_ptt, p_phys, p_phys + size,
+ size / 4, NULL);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "DMAE sanity [%s]: qed_dmae_host2host() failed. rc = %d.\n",
+ phase, rc);
+ goto out;
+ }
+
+ /* Verify that the top half of the allocated memory has the pattern */
+ for (p_tmp = (u32 *)((u8 *)p_virt + size);
+ p_tmp < (u32 *)((u8 *)p_virt + (2 * size)); p_tmp++) {
+ /* The corresponding address in the bottom half */
+ val = (u32)(uintptr_t)p_tmp - size;
+
+ if (*p_tmp != val) {
+ DP_NOTICE(p_hwfn,
+ "DMAE sanity [%s]: addr={phys 0x%llx, virt %p}, read_val 0x%08x, expected_val 0x%08x\n",
+ phase,
+ (u64)p_phys + ((u8 *)p_tmp - (u8 *)p_virt),
+ p_tmp, *p_tmp, val);
+ rc = -EINVAL;
+ goto out;
+ }
+ }
+
+out:
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev, 2 * size, p_virt, p_phys);
+ return rc;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h
new file mode 100644
index 000000000..e535983ce
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h
@@ -0,0 +1,336 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ * Copyright (c) 2019-2020 Marvell International Ltd.
+ */
+
+#ifndef _QED_HW_H
+#define _QED_HW_H
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include "qed.h"
+#include "qed_dev_api.h"
+
+/* Forward decleration */
+struct qed_ptt;
+
+enum reserved_ptts {
+ RESERVED_PTT_EDIAG,
+ RESERVED_PTT_USER_SPACE,
+ RESERVED_PTT_MAIN,
+ RESERVED_PTT_DPC,
+ RESERVED_PTT_MAX
+};
+
+enum _dmae_cmd_dst_mask {
+ DMAE_CMD_DST_MASK_NONE = 0,
+ DMAE_CMD_DST_MASK_PCIE = 1,
+ DMAE_CMD_DST_MASK_GRC = 2
+};
+
+enum _dmae_cmd_src_mask {
+ DMAE_CMD_SRC_MASK_PCIE = 0,
+ DMAE_CMD_SRC_MASK_GRC = 1
+};
+
+enum _dmae_cmd_crc_mask {
+ DMAE_CMD_COMP_CRC_EN_MASK_NONE = 0,
+ DMAE_CMD_COMP_CRC_EN_MASK_SET = 1
+};
+
+/* definitions for DMA constants */
+#define DMAE_GO_VALUE 0x1
+
+#define DMAE_COMPLETION_VAL 0xD1AE
+#define DMAE_CMD_ENDIANITY 0x2
+
+#define DMAE_CMD_SIZE 14
+#define DMAE_CMD_SIZE_TO_FILL (DMAE_CMD_SIZE - 5)
+#define DMAE_MIN_WAIT_TIME 0x2
+#define DMAE_MAX_CLIENTS 32
+
+/**
+ * qed_gtt_init(): Initialize GTT windows.
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Return: Void.
+ */
+void qed_gtt_init(struct qed_hwfn *p_hwfn);
+
+/**
+ * qed_ptt_invalidate(): Forces all ptt entries to be re-configured
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Return: Void.
+ */
+void qed_ptt_invalidate(struct qed_hwfn *p_hwfn);
+
+/**
+ * qed_ptt_pool_alloc(): Allocate and initialize PTT pool.
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Return: struct _qed_status - success (0), negative - error.
+ */
+int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * qed_ptt_pool_free(): Free PTT pool.
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Return: Void.
+ */
+void qed_ptt_pool_free(struct qed_hwfn *p_hwfn);
+
+/**
+ * qed_ptt_get_hw_addr(): Get PTT's GRC/HW address.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt
+ *
+ * Return: u32.
+ */
+u32 qed_ptt_get_hw_addr(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * qed_ptt_get_bar_addr(): Get PPT's external BAR address.
+ *
+ * @p_ptt: P_ptt
+ *
+ * Return: u32.
+ */
+u32 qed_ptt_get_bar_addr(struct qed_ptt *p_ptt);
+
+/**
+ * qed_ptt_set_win(): Set PTT Window's GRC BAR address
+ *
+ * @p_hwfn: HW device data.
+ * @new_hw_addr: New HW address.
+ * @p_ptt: P_Ptt
+ *
+ * Return: Void.
+ */
+void qed_ptt_set_win(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 new_hw_addr);
+
+/**
+ * qed_get_reserved_ptt(): Get a specific reserved PTT.
+ *
+ * @p_hwfn: HW device data.
+ * @ptt_idx: Ptt Index.
+ *
+ * Return: struct qed_ptt *.
+ */
+struct qed_ptt *qed_get_reserved_ptt(struct qed_hwfn *p_hwfn,
+ enum reserved_ptts ptt_idx);
+
+/**
+ * qed_wr(): Write value to BAR using the given ptt.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @val: Val.
+ * @hw_addr: HW address
+ *
+ * Return: Void.
+ */
+void qed_wr(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 hw_addr,
+ u32 val);
+
+/**
+ * qed_rd(): Read value from BAR using the given ptt.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @hw_addr: HW address
+ *
+ * Return: Void.
+ */
+u32 qed_rd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 hw_addr);
+
+/**
+ * qed_memcpy_from(): Copy n bytes from BAR using the given ptt.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @dest: Destination.
+ * @hw_addr: HW address.
+ * @n: N
+ *
+ * Return: Void.
+ */
+void qed_memcpy_from(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ void *dest,
+ u32 hw_addr,
+ size_t n);
+
+/**
+ * qed_memcpy_to(): Copy n bytes to BAR using the given ptt
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @hw_addr: HW address.
+ * @src: Source.
+ * @n: N
+ *
+ * Return: Void.
+ */
+void qed_memcpy_to(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 hw_addr,
+ void *src,
+ size_t n);
+/**
+ * qed_fid_pretend(): pretend to another function when
+ * accessing the ptt window. There is no way to unpretend
+ * a function. The only way to cancel a pretend is to
+ * pretend back to the original function.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @fid: fid field of pxp_pretend structure. Can contain
+ * either pf / vf, port/path fields are don't care.
+ *
+ * Return: Void.
+ */
+void qed_fid_pretend(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 fid);
+
+/**
+ * qed_port_pretend(): Pretend to another port when accessing the ptt window
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @port_id: The port to pretend to
+ *
+ * Return: Void.
+ */
+void qed_port_pretend(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u8 port_id);
+
+/**
+ * qed_port_unpretend(): Cancel any previously set port pretend
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ *
+ * Return: Void.
+ */
+void qed_port_unpretend(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * qed_port_fid_pretend(): Pretend to another port and another function
+ * when accessing the ptt window
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @port_id: The port to pretend to
+ * @fid: fid field of pxp_pretend structure. Can contain either pf / vf.
+ *
+ * Return: Void.
+ */
+void qed_port_fid_pretend(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 port_id, u16 fid);
+
+/**
+ * qed_vfid_to_concrete(): Build a concrete FID for a given VF ID
+ *
+ * @p_hwfn: HW device data.
+ * @vfid: VFID.
+ *
+ * Return: Void.
+ */
+u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid);
+
+/**
+ * qed_dmae_idx_to_go_cmd(): Map the idx to dmae cmd
+ * this is declared here since other files will require it.
+ *
+ * @idx: Index
+ *
+ * Return: Void.
+ */
+u32 qed_dmae_idx_to_go_cmd(u8 idx);
+
+/**
+ * qed_dmae_info_alloc(): Init the dmae_info structure
+ * which is part of p_hwfn.
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Return: Int.
+ */
+int qed_dmae_info_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * qed_dmae_info_free(): Free the dmae_info structure
+ * which is part of p_hwfn.
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Return: Void.
+ */
+void qed_dmae_info_free(struct qed_hwfn *p_hwfn);
+
+union qed_qm_pq_params {
+ struct {
+ u8 q_idx;
+ } iscsi;
+
+ struct {
+ u8 tc;
+ } core;
+
+ struct {
+ u8 is_vf;
+ u8 vf_id;
+ u8 tc;
+ } eth;
+
+ struct {
+ u8 dcqcn;
+ u8 qpid; /* roce relative */
+ } roce;
+};
+
+int qed_init_fw_data(struct qed_dev *cdev,
+ const u8 *fw_data);
+
+int qed_dmae_sanity(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, const char *phase);
+
+#define QED_HW_ERR_MAX_STR_SIZE 256
+
+/**
+ * qed_hw_err_notify(): Notify upper layer driver and management FW
+ * about a HW error.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @err_type: Err Type.
+ * @fmt: Debug data buffer to send to the MFW
+ * @...: buffer format args
+ *
+ * Return void.
+ */
+void __printf(4, 5) __cold qed_hw_err_notify(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_hw_err_type err_type,
+ const char *fmt, ...);
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
new file mode 100644
index 000000000..407029a36
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
@@ -0,0 +1,1932 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ * Copyright (c) 2019-2021 Marvell International Ltd.
+ */
+
+#include <linux/types.h>
+#include <linux/crc8.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_init_ops.h"
+#include "qed_iro_hsi.h"
+#include "qed_reg_addr.h"
+
+#define CDU_VALIDATION_DEFAULT_CFG CDU_CONTEXT_VALIDATION_DEFAULT_CFG
+
+static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES] = {
+ {400, 336, 352, 368, 304, 384, 416, 352}, /* region 3 offsets */
+ {528, 496, 416, 512, 448, 512, 544, 480}, /* region 4 offsets */
+ {608, 544, 496, 576, 576, 592, 624, 560} /* region 5 offsets */
+};
+
+static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES] = {
+ {240, 240, 112, 0, 0, 0, 0, 96} /* region 1 offsets */
+};
+
+/* General constants */
+#define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \
+ QM_PQ_ELEMENT_SIZE, \
+ 0x1000) : 0)
+#define QM_PQ_SIZE_256B(pq_size) (pq_size ? DIV_ROUND_UP(pq_size, \
+ 0x100) - 1 : 0)
+#define QM_INVALID_PQ_ID 0xffff
+
+/* Max link speed (in Mbps) */
+#define QM_MAX_LINK_SPEED 100000
+
+/* Feature enable */
+#define QM_BYPASS_EN 1
+#define QM_BYTE_CRD_EN 1
+
+/* Initial VOQ byte credit */
+#define QM_INITIAL_VOQ_BYTE_CRD 98304
+/* Other PQ constants */
+#define QM_OTHER_PQS_PER_PF 4
+
+/* VOQ constants */
+#define MAX_NUM_VOQS (MAX_NUM_PORTS_K2 * NUM_TCS_4PORT_K2)
+#define VOQS_BIT_MASK (BIT(MAX_NUM_VOQS) - 1)
+
+/* WFQ constants */
+
+/* PF WFQ increment value, 0x9000 = 4*9*1024 */
+#define QM_PF_WFQ_INC_VAL(weight) ((weight) * 0x9000)
+
+/* PF WFQ Upper bound, in MB, 10 * burst size of 1ms in 50Gbps */
+#define QM_PF_WFQ_UPPER_BOUND 62500000
+
+/* PF WFQ max increment value, 0.7 * upper bound */
+#define QM_PF_WFQ_MAX_INC_VAL ((QM_PF_WFQ_UPPER_BOUND * 7) / 10)
+
+/* Number of VOQs in E5 PF WFQ credit register (QmWfqCrd) */
+#define QM_PF_WFQ_CRD_E5_NUM_VOQS 16
+
+/* VP WFQ increment value */
+#define QM_VP_WFQ_INC_VAL(weight) ((weight) * QM_VP_WFQ_MIN_INC_VAL)
+
+/* VP WFQ min increment value */
+#define QM_VP_WFQ_MIN_INC_VAL 10800
+
+/* VP WFQ max increment value, 2^30 */
+#define QM_VP_WFQ_MAX_INC_VAL 0x40000000
+
+/* VP WFQ bypass threshold */
+#define QM_VP_WFQ_BYPASS_THRESH (QM_VP_WFQ_MIN_INC_VAL - 100)
+
+/* VP RL credit task cost */
+#define QM_VP_RL_CRD_TASK_COST 9700
+
+/* Bit of VOQ in VP WFQ PQ map */
+#define QM_VP_WFQ_PQ_VOQ_SHIFT 0
+
+/* Bit of PF in VP WFQ PQ map */
+#define QM_VP_WFQ_PQ_PF_SHIFT 5
+
+/* RL constants */
+
+/* Period in us */
+#define QM_RL_PERIOD 5
+
+/* Period in 25MHz cycles */
+#define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD)
+
+/* RL increment value - rate is specified in mbps */
+#define QM_RL_INC_VAL(rate) ({ \
+ typeof(rate) __rate = (rate); \
+ max_t(u32, \
+ (u32)(((__rate ? __rate : \
+ 100000) * \
+ QM_RL_PERIOD * \
+ 101) / (8 * 100)), 1); })
+
+/* PF RL Upper bound is set to 10 * burst size of 1ms in 50Gbps */
+#define QM_PF_RL_UPPER_BOUND 62500000
+
+/* Max PF RL increment value is 0.7 * upper bound */
+#define QM_PF_RL_MAX_INC_VAL ((QM_PF_RL_UPPER_BOUND * 7) / 10)
+
+/* QCN RL Upper bound, speed is in Mpbs */
+#define QM_GLOBAL_RL_UPPER_BOUND(speed) ((u32)max_t( \
+ u32, \
+ (u32)(((speed) * \
+ QM_RL_PERIOD * 101) / (8 * 100)), \
+ QM_VP_RL_CRD_TASK_COST \
+ + 1000))
+
+/* AFullOprtnstcCrdMask constants */
+#define QM_OPPOR_LINE_VOQ_DEF 1
+#define QM_OPPOR_FW_STOP_DEF 0
+#define QM_OPPOR_PQ_EMPTY_DEF 1
+
+/* Command Queue constants */
+
+/* Pure LB CmdQ lines (+spare) */
+#define PBF_CMDQ_PURE_LB_LINES 150
+
+#define PBF_CMDQ_LINES_RT_OFFSET(ext_voq) \
+ (PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + \
+ (ext_voq) * (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
+ PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
+
+#define PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq) \
+ (PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + \
+ (ext_voq) * (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \
+ PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
+
+/* Returns the VOQ line credit for the specified number of PBF command lines.
+ * PBF lines are specified in 256b units.
+ */
+#define QM_VOQ_LINE_CRD(pbf_cmd_lines) \
+ ((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT)
+
+/* BTB: blocks constants (block size = 256B) */
+
+/* 256B blocks in 9700B packet */
+#define BTB_JUMBO_PKT_BLOCKS 38
+
+/* Headroom per-port */
+#define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS
+#define BTB_PURE_LB_FACTOR 10
+
+/* Factored (hence really 0.7) */
+#define BTB_PURE_LB_RATIO 7
+
+/* QM stop command constants */
+#define QM_STOP_PQ_MASK_WIDTH 32
+#define QM_STOP_CMD_ADDR 2
+#define QM_STOP_CMD_STRUCT_SIZE 2
+#define QM_STOP_CMD_PAUSE_MASK_OFFSET 0
+#define QM_STOP_CMD_PAUSE_MASK_SHIFT 0
+#define QM_STOP_CMD_PAUSE_MASK_MASK -1
+#define QM_STOP_CMD_GROUP_ID_OFFSET 1
+#define QM_STOP_CMD_GROUP_ID_SHIFT 16
+#define QM_STOP_CMD_GROUP_ID_MASK 15
+#define QM_STOP_CMD_PQ_TYPE_OFFSET 1
+#define QM_STOP_CMD_PQ_TYPE_SHIFT 24
+#define QM_STOP_CMD_PQ_TYPE_MASK 1
+#define QM_STOP_CMD_MAX_POLL_COUNT 100
+#define QM_STOP_CMD_POLL_PERIOD_US 500
+
+/* QM command macros */
+#define QM_CMD_STRUCT_SIZE(cmd) cmd ## _STRUCT_SIZE
+#define QM_CMD_SET_FIELD(var, cmd, field, value) \
+ SET_FIELD(var[cmd ## _ ## field ## _OFFSET], \
+ cmd ## _ ## field, \
+ value)
+
+#define QM_INIT_TX_PQ_MAP(p_hwfn, map, pq_id, vp_pq_id, rl_valid, \
+ rl_id, ext_voq, wrr) \
+ do { \
+ u32 __reg = 0; \
+ \
+ BUILD_BUG_ON(sizeof((map).reg) != sizeof(__reg)); \
+ memset(&(map), 0, sizeof(map)); \
+ SET_FIELD(__reg, QM_RF_PQ_MAP_PQ_VALID, 1); \
+ SET_FIELD(__reg, QM_RF_PQ_MAP_RL_VALID, \
+ !!(rl_valid)); \
+ SET_FIELD(__reg, QM_RF_PQ_MAP_VP_PQ_ID, (vp_pq_id)); \
+ SET_FIELD(__reg, QM_RF_PQ_MAP_RL_ID, (rl_id)); \
+ SET_FIELD(__reg, QM_RF_PQ_MAP_VOQ, (ext_voq)); \
+ SET_FIELD(__reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP, \
+ (wrr)); \
+ \
+ STORE_RT_REG((p_hwfn), QM_REG_TXPQMAP_RT_OFFSET + (pq_id), \
+ __reg); \
+ (map).reg = cpu_to_le32(__reg); \
+ } while (0)
+
+#define WRITE_PQ_INFO_TO_RAM 1
+#define PQ_INFO_ELEMENT(vp, pf, tc, port, rl_valid, rl) \
+ (((vp) << 0) | ((pf) << 12) | ((tc) << 16) | ((port) << 20) | \
+ ((rl_valid ? 1 : 0) << 22) | (((rl) & 255) << 24) | \
+ (((rl) >> 8) << 9))
+
+#define PQ_INFO_RAM_GRC_ADDRESS(pq_id) \
+ (XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + \
+ XSTORM_PQ_INFO_OFFSET(pq_id))
+
+static const char * const s_protocol_types[] = {
+ "PROTOCOLID_ISCSI", "PROTOCOLID_FCOE", "PROTOCOLID_ROCE",
+ "PROTOCOLID_CORE", "PROTOCOLID_ETH", "PROTOCOLID_IWARP",
+ "PROTOCOLID_TOE", "PROTOCOLID_PREROCE", "PROTOCOLID_COMMON",
+ "PROTOCOLID_TCP", "PROTOCOLID_RDMA", "PROTOCOLID_SCSI",
+};
+
+static const char *s_ramrod_cmd_ids[][28] = {
+ {
+ "ISCSI_RAMROD_CMD_ID_UNUSED", "ISCSI_RAMROD_CMD_ID_INIT_FUNC",
+ "ISCSI_RAMROD_CMD_ID_DESTROY_FUNC",
+ "ISCSI_RAMROD_CMD_ID_OFFLOAD_CONN",
+ "ISCSI_RAMROD_CMD_ID_UPDATE_CONN",
+ "ISCSI_RAMROD_CMD_ID_TERMINATION_CONN",
+ "ISCSI_RAMROD_CMD_ID_CLEAR_SQ", "ISCSI_RAMROD_CMD_ID_MAC_UPDATE",
+ "ISCSI_RAMROD_CMD_ID_CONN_STATS", },
+ { "FCOE_RAMROD_CMD_ID_INIT_FUNC", "FCOE_RAMROD_CMD_ID_DESTROY_FUNC",
+ "FCOE_RAMROD_CMD_ID_STAT_FUNC",
+ "FCOE_RAMROD_CMD_ID_OFFLOAD_CONN",
+ "FCOE_RAMROD_CMD_ID_TERMINATE_CONN", },
+ { "RDMA_RAMROD_UNUSED", "RDMA_RAMROD_FUNC_INIT",
+ "RDMA_RAMROD_FUNC_CLOSE", "RDMA_RAMROD_REGISTER_MR",
+ "RDMA_RAMROD_DEREGISTER_MR", "RDMA_RAMROD_CREATE_CQ",
+ "RDMA_RAMROD_RESIZE_CQ", "RDMA_RAMROD_DESTROY_CQ",
+ "RDMA_RAMROD_CREATE_SRQ", "RDMA_RAMROD_MODIFY_SRQ",
+ "RDMA_RAMROD_DESTROY_SRQ", "RDMA_RAMROD_START_NS_TRACKING",
+ "RDMA_RAMROD_STOP_NS_TRACKING", "ROCE_RAMROD_CREATE_QP",
+ "ROCE_RAMROD_MODIFY_QP", "ROCE_RAMROD_QUERY_QP",
+ "ROCE_RAMROD_DESTROY_QP", "ROCE_RAMROD_CREATE_UD_QP",
+ "ROCE_RAMROD_DESTROY_UD_QP", "ROCE_RAMROD_FUNC_UPDATE",
+ "ROCE_RAMROD_SUSPEND_QP", "ROCE_RAMROD_QUERY_SUSPENDED_QP",
+ "ROCE_RAMROD_CREATE_SUSPENDED_QP", "ROCE_RAMROD_RESUME_QP",
+ "ROCE_RAMROD_SUSPEND_UD_QP", "ROCE_RAMROD_RESUME_UD_QP",
+ "ROCE_RAMROD_CREATE_SUSPENDED_UD_QP", "ROCE_RAMROD_FLUSH_DPT_QP", },
+ { "CORE_RAMROD_UNUSED", "CORE_RAMROD_RX_QUEUE_START",
+ "CORE_RAMROD_TX_QUEUE_START", "CORE_RAMROD_RX_QUEUE_STOP",
+ "CORE_RAMROD_TX_QUEUE_STOP",
+ "CORE_RAMROD_RX_QUEUE_FLUSH",
+ "CORE_RAMROD_TX_QUEUE_UPDATE", "CORE_RAMROD_QUEUE_STATS_QUERY", },
+ { "ETH_RAMROD_UNUSED", "ETH_RAMROD_VPORT_START",
+ "ETH_RAMROD_VPORT_UPDATE", "ETH_RAMROD_VPORT_STOP",
+ "ETH_RAMROD_RX_QUEUE_START", "ETH_RAMROD_RX_QUEUE_STOP",
+ "ETH_RAMROD_TX_QUEUE_START", "ETH_RAMROD_TX_QUEUE_STOP",
+ "ETH_RAMROD_FILTERS_UPDATE", "ETH_RAMROD_RX_QUEUE_UPDATE",
+ "ETH_RAMROD_RX_CREATE_OPENFLOW_ACTION",
+ "ETH_RAMROD_RX_ADD_OPENFLOW_FILTER",
+ "ETH_RAMROD_RX_DELETE_OPENFLOW_FILTER",
+ "ETH_RAMROD_RX_ADD_UDP_FILTER",
+ "ETH_RAMROD_RX_DELETE_UDP_FILTER",
+ "ETH_RAMROD_RX_CREATE_GFT_ACTION",
+ "ETH_RAMROD_RX_UPDATE_GFT_FILTER", "ETH_RAMROD_TX_QUEUE_UPDATE",
+ "ETH_RAMROD_RGFS_FILTER_ADD", "ETH_RAMROD_RGFS_FILTER_DEL",
+ "ETH_RAMROD_TGFS_FILTER_ADD", "ETH_RAMROD_TGFS_FILTER_DEL",
+ "ETH_RAMROD_GFS_COUNTERS_REPORT_REQUEST", },
+ { "RDMA_RAMROD_UNUSED", "RDMA_RAMROD_FUNC_INIT",
+ "RDMA_RAMROD_FUNC_CLOSE", "RDMA_RAMROD_REGISTER_MR",
+ "RDMA_RAMROD_DEREGISTER_MR", "RDMA_RAMROD_CREATE_CQ",
+ "RDMA_RAMROD_RESIZE_CQ", "RDMA_RAMROD_DESTROY_CQ",
+ "RDMA_RAMROD_CREATE_SRQ", "RDMA_RAMROD_MODIFY_SRQ",
+ "RDMA_RAMROD_DESTROY_SRQ", "RDMA_RAMROD_START_NS_TRACKING",
+ "RDMA_RAMROD_STOP_NS_TRACKING",
+ "IWARP_RAMROD_CMD_ID_TCP_OFFLOAD",
+ "IWARP_RAMROD_CMD_ID_MPA_OFFLOAD",
+ "IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR",
+ "IWARP_RAMROD_CMD_ID_CREATE_QP", "IWARP_RAMROD_CMD_ID_QUERY_QP",
+ "IWARP_RAMROD_CMD_ID_MODIFY_QP",
+ "IWARP_RAMROD_CMD_ID_DESTROY_QP",
+ "IWARP_RAMROD_CMD_ID_ABORT_TCP_OFFLOAD", },
+ { NULL }, /*TOE*/
+ { NULL }, /*PREROCE*/
+ { "COMMON_RAMROD_UNUSED", "COMMON_RAMROD_PF_START",
+ "COMMON_RAMROD_PF_STOP", "COMMON_RAMROD_VF_START",
+ "COMMON_RAMROD_VF_STOP", "COMMON_RAMROD_PF_UPDATE",
+ "COMMON_RAMROD_RL_UPDATE", "COMMON_RAMROD_EMPTY", }
+};
+
+/******************** INTERNAL IMPLEMENTATION *********************/
+
+/* Returns the external VOQ number */
+static u8 qed_get_ext_voq(struct qed_hwfn *p_hwfn,
+ u8 port_id, u8 tc, u8 max_phys_tcs_per_port)
+{
+ if (tc == PURE_LB_TC)
+ return NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB + port_id;
+ else
+ return port_id * max_phys_tcs_per_port + tc;
+}
+
+/* Prepare PF RL enable/disable runtime init values */
+static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en)
+{
+ STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
+ if (pf_rl_en) {
+ u8 num_ext_voqs = MAX_NUM_VOQS;
+ u64 voq_bit_mask = ((u64)1 << num_ext_voqs) - 1;
+
+ /* Enable RLs for all VOQs */
+ STORE_RT_REG(p_hwfn,
+ QM_REG_RLPFVOQENABLE_RT_OFFSET,
+ (u32)voq_bit_mask);
+
+ /* Write RL period */
+ STORE_RT_REG(p_hwfn,
+ QM_REG_RLPFPERIOD_RT_OFFSET, QM_RL_PERIOD_CLK_25M);
+ STORE_RT_REG(p_hwfn,
+ QM_REG_RLPFPERIODTIMER_RT_OFFSET,
+ QM_RL_PERIOD_CLK_25M);
+
+ /* Set credit threshold for QM bypass flow */
+ if (QM_BYPASS_EN)
+ STORE_RT_REG(p_hwfn,
+ QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET,
+ QM_PF_RL_UPPER_BOUND);
+ }
+}
+
+/* Prepare PF WFQ enable/disable runtime init values */
+static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn, bool pf_wfq_en)
+{
+ STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0);
+
+ /* Set credit threshold for QM bypass flow */
+ if (pf_wfq_en && QM_BYPASS_EN)
+ STORE_RT_REG(p_hwfn,
+ QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET,
+ QM_PF_WFQ_UPPER_BOUND);
+}
+
+/* Prepare global RL enable/disable runtime init values */
+static void qed_enable_global_rl(struct qed_hwfn *p_hwfn, bool global_rl_en)
+{
+ STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET,
+ global_rl_en ? 1 : 0);
+ if (global_rl_en) {
+ /* Write RL period (use timer 0 only) */
+ STORE_RT_REG(p_hwfn,
+ QM_REG_RLGLBLPERIOD_0_RT_OFFSET,
+ QM_RL_PERIOD_CLK_25M);
+ STORE_RT_REG(p_hwfn,
+ QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET,
+ QM_RL_PERIOD_CLK_25M);
+
+ /* Set credit threshold for QM bypass flow */
+ if (QM_BYPASS_EN)
+ STORE_RT_REG(p_hwfn,
+ QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET,
+ QM_GLOBAL_RL_UPPER_BOUND(10000) - 1);
+ }
+}
+
+/* Prepare VPORT WFQ enable/disable runtime init values */
+static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn, bool vport_wfq_en)
+{
+ STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET,
+ vport_wfq_en ? 1 : 0);
+
+ /* Set credit threshold for QM bypass flow */
+ if (vport_wfq_en && QM_BYPASS_EN)
+ STORE_RT_REG(p_hwfn,
+ QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET,
+ QM_VP_WFQ_BYPASS_THRESH);
+}
+
+/* Prepare runtime init values to allocate PBF command queue lines for
+ * the specified VOQ.
+ */
+static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn,
+ u8 ext_voq, u16 cmdq_lines)
+{
+ u32 qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
+
+ OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq),
+ (u32)cmdq_lines);
+ STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + ext_voq,
+ qm_line_crd);
+ STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + ext_voq,
+ qm_line_crd);
+}
+
+/* Prepare runtime init values to allocate PBF command queue lines. */
+static void
+qed_cmdq_lines_rt_init(struct qed_hwfn *p_hwfn,
+ u8 max_ports_per_engine,
+ u8 max_phys_tcs_per_port,
+ struct init_qm_port_params port_params[MAX_NUM_PORTS])
+{
+ u8 tc, ext_voq, port_id, num_tcs_in_port;
+ u8 num_ext_voqs = MAX_NUM_VOQS;
+
+ /* Clear PBF lines of all VOQs */
+ for (ext_voq = 0; ext_voq < num_ext_voqs; ext_voq++)
+ STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq), 0);
+
+ for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
+ u16 phys_lines, phys_lines_per_tc;
+
+ if (!port_params[port_id].active)
+ continue;
+
+ /* Find number of command queue lines to divide between the
+ * active physical TCs.
+ */
+ phys_lines = port_params[port_id].num_pbf_cmd_lines;
+ phys_lines -= PBF_CMDQ_PURE_LB_LINES;
+
+ /* Find #lines per active physical TC */
+ num_tcs_in_port = 0;
+ for (tc = 0; tc < max_phys_tcs_per_port; tc++)
+ if (((port_params[port_id].active_phys_tcs >>
+ tc) & 0x1) == 1)
+ num_tcs_in_port++;
+ phys_lines_per_tc = phys_lines / num_tcs_in_port;
+
+ /* Init registers per active TC */
+ for (tc = 0; tc < max_phys_tcs_per_port; tc++) {
+ ext_voq = qed_get_ext_voq(p_hwfn,
+ port_id,
+ tc, max_phys_tcs_per_port);
+ if (((port_params[port_id].active_phys_tcs >>
+ tc) & 0x1) == 1)
+ qed_cmdq_lines_voq_rt_init(p_hwfn,
+ ext_voq,
+ phys_lines_per_tc);
+ }
+
+ /* Init registers for pure LB TC */
+ ext_voq = qed_get_ext_voq(p_hwfn,
+ port_id,
+ PURE_LB_TC, max_phys_tcs_per_port);
+ qed_cmdq_lines_voq_rt_init(p_hwfn, ext_voq,
+ PBF_CMDQ_PURE_LB_LINES);
+ }
+}
+
+/* Prepare runtime init values to allocate guaranteed BTB blocks for the
+ * specified port. The guaranteed BTB space is divided between the TCs as
+ * follows (shared space Is currently not used):
+ * 1. Parameters:
+ * B - BTB blocks for this port
+ * C - Number of physical TCs for this port
+ * 2. Calculation:
+ * a. 38 blocks (9700B jumbo frame) are allocated for global per port
+ * headroom.
+ * b. B = B - 38 (remainder after global headroom allocation).
+ * c. MAX(38,B/(C+0.7)) blocks are allocated for the pure LB VOQ.
+ * d. B = B - MAX(38, B/(C+0.7)) (remainder after pure LB allocation).
+ * e. B/C blocks are allocated for each physical TC.
+ * Assumptions:
+ * - MTU is up to 9700 bytes (38 blocks)
+ * - All TCs are considered symmetrical (same rate and packet size)
+ * - No optimization for lossy TC (all are considered lossless). Shared space
+ * is not enabled and allocated for each TC.
+ */
+static void
+qed_btb_blocks_rt_init(struct qed_hwfn *p_hwfn,
+ u8 max_ports_per_engine,
+ u8 max_phys_tcs_per_port,
+ struct init_qm_port_params port_params[MAX_NUM_PORTS])
+{
+ u32 usable_blocks, pure_lb_blocks, phys_blocks;
+ u8 tc, ext_voq, port_id, num_tcs_in_port;
+
+ for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
+ if (!port_params[port_id].active)
+ continue;
+
+ /* Subtract headroom blocks */
+ usable_blocks = port_params[port_id].num_btb_blocks -
+ BTB_HEADROOM_BLOCKS;
+
+ /* Find blocks per physical TC. Use factor to avoid floating
+ * arithmethic.
+ */
+ num_tcs_in_port = 0;
+ for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++)
+ if (((port_params[port_id].active_phys_tcs >>
+ tc) & 0x1) == 1)
+ num_tcs_in_port++;
+
+ pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) /
+ (num_tcs_in_port * BTB_PURE_LB_FACTOR +
+ BTB_PURE_LB_RATIO);
+ pure_lb_blocks = max_t(u32, BTB_JUMBO_PKT_BLOCKS,
+ pure_lb_blocks / BTB_PURE_LB_FACTOR);
+ phys_blocks = (usable_blocks - pure_lb_blocks) /
+ num_tcs_in_port;
+
+ /* Init physical TCs */
+ for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
+ if (((port_params[port_id].active_phys_tcs >>
+ tc) & 0x1) == 1) {
+ ext_voq =
+ qed_get_ext_voq(p_hwfn,
+ port_id,
+ tc,
+ max_phys_tcs_per_port);
+ STORE_RT_REG(p_hwfn,
+ PBF_BTB_GUARANTEED_RT_OFFSET
+ (ext_voq), phys_blocks);
+ }
+ }
+
+ /* Init pure LB TC */
+ ext_voq = qed_get_ext_voq(p_hwfn,
+ port_id,
+ PURE_LB_TC, max_phys_tcs_per_port);
+ STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq),
+ pure_lb_blocks);
+ }
+}
+
+/* Prepare runtime init values for the specified RL.
+ * Set max link speed (100Gbps) per rate limiter.
+ * Return -1 on error.
+ */
+static int qed_global_rl_rt_init(struct qed_hwfn *p_hwfn)
+{
+ u32 upper_bound = QM_GLOBAL_RL_UPPER_BOUND(QM_MAX_LINK_SPEED) |
+ (u32)QM_RL_CRD_REG_SIGN_BIT;
+ u32 inc_val;
+ u16 rl_id;
+
+ /* Go over all global RLs */
+ for (rl_id = 0; rl_id < MAX_QM_GLOBAL_RLS; rl_id++) {
+ inc_val = QM_RL_INC_VAL(QM_MAX_LINK_SPEED);
+
+ STORE_RT_REG(p_hwfn,
+ QM_REG_RLGLBLCRD_RT_OFFSET + rl_id,
+ (u32)QM_RL_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn,
+ QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + rl_id,
+ upper_bound);
+ STORE_RT_REG(p_hwfn,
+ QM_REG_RLGLBLINCVAL_RT_OFFSET + rl_id, inc_val);
+ }
+
+ return 0;
+}
+
+/* Returns the upper bound for the specified Vport RL parameters.
+ * link_speed is in Mbps.
+ * Returns 0 in case of error.
+ */
+static u32 qed_get_vport_rl_upper_bound(enum init_qm_rl_type vport_rl_type,
+ u32 link_speed)
+{
+ switch (vport_rl_type) {
+ case QM_RL_TYPE_NORMAL:
+ return QM_INITIAL_VOQ_BYTE_CRD;
+ case QM_RL_TYPE_QCN:
+ return QM_GLOBAL_RL_UPPER_BOUND(link_speed);
+ default:
+ return 0;
+ }
+}
+
+/* Prepare VPORT RL runtime init values.
+ * Return -1 on error.
+ */
+static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn,
+ u16 start_rl,
+ u16 num_rls,
+ u32 link_speed,
+ struct init_qm_rl_params *rl_params)
+{
+ u16 i, rl_id;
+
+ if (num_rls && start_rl + num_rls >= MAX_QM_GLOBAL_RLS) {
+ DP_NOTICE(p_hwfn, "Invalid rate limiter configuration\n");
+ return -1;
+ }
+
+ /* Go over all PF VPORTs */
+ for (i = 0, rl_id = start_rl; i < num_rls; i++, rl_id++) {
+ u32 upper_bound, inc_val;
+
+ upper_bound =
+ qed_get_vport_rl_upper_bound((enum init_qm_rl_type)
+ rl_params[i].vport_rl_type,
+ link_speed);
+
+ inc_val =
+ QM_RL_INC_VAL(rl_params[i].vport_rl ?
+ rl_params[i].vport_rl : link_speed);
+ if (inc_val > upper_bound) {
+ DP_NOTICE(p_hwfn,
+ "Invalid RL rate - limit configuration\n");
+ return -1;
+ }
+
+ STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + rl_id,
+ (u32)QM_RL_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + rl_id,
+ upper_bound | (u32)QM_RL_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + rl_id,
+ inc_val);
+ }
+
+ return 0;
+}
+
+/* Prepare Tx PQ mapping runtime init values for the specified PF */
+static int qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_qm_pf_rt_init_params *p_params,
+ u32 base_mem_addr_4kb)
+{
+ u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
+ struct init_qm_vport_params *vport_params = p_params->vport_params;
+ u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE;
+ u16 num_pqs, first_pq_group, last_pq_group, i, j, pq_id, pq_group;
+ struct init_qm_pq_params *pq_params = p_params->pq_params;
+ u32 pq_mem_4kb, vport_pq_mem_4kb, mem_addr_4kb;
+
+ num_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
+
+ first_pq_group = p_params->start_pq / QM_PF_QUEUE_GROUP_SIZE;
+ last_pq_group = (p_params->start_pq + num_pqs - 1) /
+ QM_PF_QUEUE_GROUP_SIZE;
+
+ pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids);
+ vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids);
+ mem_addr_4kb = base_mem_addr_4kb;
+
+ /* Set mapping from PQ group to PF */
+ for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++)
+ STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group,
+ (u32)(p_params->pf_id));
+
+ /* Set PQ sizes */
+ STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET,
+ QM_PQ_SIZE_256B(p_params->num_pf_cids));
+ STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET,
+ QM_PQ_SIZE_256B(p_params->num_vf_cids));
+
+ /* Go over all Tx PQs */
+ for (i = 0, pq_id = p_params->start_pq; i < num_pqs; i++, pq_id++) {
+ u16 *p_first_tx_pq_id, vport_id_in_pf;
+ struct qm_rf_pq_map tx_pq_map;
+ u8 tc_id = pq_params[i].tc_id;
+ bool is_vf_pq;
+ u8 ext_voq;
+
+ ext_voq = qed_get_ext_voq(p_hwfn,
+ pq_params[i].port_id,
+ tc_id,
+ p_params->max_phys_tcs_per_port);
+ is_vf_pq = (i >= p_params->num_pf_pqs);
+
+ /* Update first Tx PQ of VPORT/TC */
+ vport_id_in_pf = pq_params[i].vport_id - p_params->start_vport;
+ p_first_tx_pq_id =
+ &vport_params[vport_id_in_pf].first_tx_pq_id[tc_id];
+ if (*p_first_tx_pq_id == QM_INVALID_PQ_ID) {
+ u32 map_val =
+ (ext_voq << QM_VP_WFQ_PQ_VOQ_SHIFT) |
+ (p_params->pf_id << QM_VP_WFQ_PQ_PF_SHIFT);
+
+ /* Create new VP PQ */
+ *p_first_tx_pq_id = pq_id;
+
+ /* Map VP PQ to VOQ and PF */
+ STORE_RT_REG(p_hwfn,
+ QM_REG_WFQVPMAP_RT_OFFSET +
+ *p_first_tx_pq_id,
+ map_val);
+ }
+
+ /* Prepare PQ map entry */
+ QM_INIT_TX_PQ_MAP(p_hwfn,
+ tx_pq_map,
+ pq_id,
+ *p_first_tx_pq_id,
+ pq_params[i].rl_valid,
+ pq_params[i].rl_id,
+ ext_voq, pq_params[i].wrr_group);
+
+ /* Set PQ base address */
+ STORE_RT_REG(p_hwfn,
+ QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
+ mem_addr_4kb);
+
+ /* Clear PQ pointer table entry (64 bit) */
+ if (p_params->is_pf_loading)
+ for (j = 0; j < 2; j++)
+ STORE_RT_REG(p_hwfn,
+ QM_REG_PTRTBLTX_RT_OFFSET +
+ (pq_id * 2) + j, 0);
+
+ /* Write PQ info to RAM */
+ if (WRITE_PQ_INFO_TO_RAM != 0) {
+ u32 pq_info = 0;
+
+ pq_info = PQ_INFO_ELEMENT(*p_first_tx_pq_id,
+ p_params->pf_id,
+ tc_id,
+ pq_params[i].port_id,
+ pq_params[i].rl_valid,
+ pq_params[i].rl_id);
+ qed_wr(p_hwfn, p_ptt, PQ_INFO_RAM_GRC_ADDRESS(pq_id),
+ pq_info);
+ }
+
+ /* If VF PQ, add indication to PQ VF mask */
+ if (is_vf_pq) {
+ tx_pq_vf_mask[pq_id /
+ QM_PF_QUEUE_GROUP_SIZE] |=
+ BIT((pq_id % QM_PF_QUEUE_GROUP_SIZE));
+ mem_addr_4kb += vport_pq_mem_4kb;
+ } else {
+ mem_addr_4kb += pq_mem_4kb;
+ }
+ }
+
+ /* Store Tx PQ VF mask to size select register */
+ for (i = 0; i < num_tx_pq_vf_masks; i++)
+ if (tx_pq_vf_mask[i])
+ STORE_RT_REG(p_hwfn,
+ QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i,
+ tx_pq_vf_mask[i]);
+
+ return 0;
+}
+
+/* Prepare Other PQ mapping runtime init values for the specified PF */
+static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn,
+ u8 pf_id,
+ bool is_pf_loading,
+ u32 num_pf_cids,
+ u32 num_tids, u32 base_mem_addr_4kb)
+{
+ u32 pq_size, pq_mem_4kb, mem_addr_4kb;
+ u16 i, j, pq_id, pq_group;
+
+ /* A single other PQ group is used in each PF, where PQ group i is used
+ * in PF i.
+ */
+ pq_group = pf_id;
+ pq_size = num_pf_cids + num_tids;
+ pq_mem_4kb = QM_PQ_MEM_4KB(pq_size);
+ mem_addr_4kb = base_mem_addr_4kb;
+
+ /* Map PQ group to PF */
+ STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group,
+ (u32)(pf_id));
+
+ /* Set PQ sizes */
+ STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET,
+ QM_PQ_SIZE_256B(pq_size));
+
+ for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE;
+ i < QM_OTHER_PQS_PER_PF; i++, pq_id++) {
+ /* Set PQ base address */
+ STORE_RT_REG(p_hwfn,
+ QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id,
+ mem_addr_4kb);
+
+ /* Clear PQ pointer table entry */
+ if (is_pf_loading)
+ for (j = 0; j < 2; j++)
+ STORE_RT_REG(p_hwfn,
+ QM_REG_PTRTBLOTHER_RT_OFFSET +
+ (pq_id * 2) + j, 0);
+
+ mem_addr_4kb += pq_mem_4kb;
+ }
+}
+
+/* Prepare PF WFQ runtime init values for the specified PF.
+ * Return -1 on error.
+ */
+static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
+ struct qed_qm_pf_rt_init_params *p_params)
+{
+ u16 num_tx_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
+ struct init_qm_pq_params *pq_params = p_params->pq_params;
+ u32 inc_val, crd_reg_offset;
+ u8 ext_voq;
+ u16 i;
+
+ inc_val = QM_PF_WFQ_INC_VAL(p_params->pf_wfq);
+ if (!inc_val || inc_val > QM_PF_WFQ_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration\n");
+ return -1;
+ }
+
+ for (i = 0; i < num_tx_pqs; i++) {
+ ext_voq = qed_get_ext_voq(p_hwfn,
+ pq_params[i].port_id,
+ pq_params[i].tc_id,
+ p_params->max_phys_tcs_per_port);
+ crd_reg_offset =
+ (p_params->pf_id < MAX_NUM_PFS_BB ?
+ QM_REG_WFQPFCRD_RT_OFFSET :
+ QM_REG_WFQPFCRD_MSB_RT_OFFSET) +
+ ext_voq * MAX_NUM_PFS_BB +
+ (p_params->pf_id % MAX_NUM_PFS_BB);
+ OVERWRITE_RT_REG(p_hwfn,
+ crd_reg_offset, (u32)QM_WFQ_CRD_REG_SIGN_BIT);
+ }
+
+ STORE_RT_REG(p_hwfn,
+ QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id,
+ QM_PF_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
+ inc_val);
+
+ return 0;
+}
+
+/* Prepare PF RL runtime init values for the specified PF.
+ * Return -1 on error.
+ */
+static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)
+{
+ u32 inc_val = QM_RL_INC_VAL(pf_rl);
+
+ if (inc_val > QM_PF_RL_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration\n");
+ return -1;
+ }
+
+ STORE_RT_REG(p_hwfn,
+ QM_REG_RLPFCRD_RT_OFFSET + pf_id,
+ (u32)QM_RL_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn,
+ QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
+ QM_PF_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val);
+
+ return 0;
+}
+
+/* Prepare VPORT WFQ runtime init values for the specified VPORTs.
+ * Return -1 on error.
+ */
+static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
+ u16 num_vports,
+ struct init_qm_vport_params *vport_params)
+{
+ u16 vport_pq_id, wfq, i;
+ u32 inc_val;
+ u8 tc;
+
+ /* Go over all PF VPORTs */
+ for (i = 0; i < num_vports; i++) {
+ /* Each VPORT can have several VPORT PQ IDs for various TCs */
+ for (tc = 0; tc < NUM_OF_TCS; tc++) {
+ /* Check if VPORT/TC is valid */
+ vport_pq_id = vport_params[i].first_tx_pq_id[tc];
+ if (vport_pq_id == QM_INVALID_PQ_ID)
+ continue;
+
+ /* Find WFQ weight (per VPORT or per VPORT+TC) */
+ wfq = vport_params[i].wfq;
+ wfq = wfq ? wfq : vport_params[i].tc_wfq[tc];
+ inc_val = QM_VP_WFQ_INC_VAL(wfq);
+ if (inc_val > QM_VP_WFQ_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn,
+ "Invalid VPORT WFQ weight configuration\n");
+ return -1;
+ }
+
+ /* Config registers */
+ STORE_RT_REG(p_hwfn, QM_REG_WFQVPCRD_RT_OFFSET +
+ vport_pq_id,
+ (u32)QM_WFQ_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_WFQVPUPPERBOUND_RT_OFFSET +
+ vport_pq_id,
+ inc_val | QM_WFQ_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_WFQVPWEIGHT_RT_OFFSET +
+ vport_pq_id, inc_val);
+ }
+ }
+
+ return 0;
+}
+
+static bool qed_poll_on_qm_cmd_ready(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 reg_val, i;
+
+ for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && !reg_val;
+ i++) {
+ udelay(QM_STOP_CMD_POLL_PERIOD_US);
+ reg_val = qed_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY);
+ }
+
+ /* Check if timeout while waiting for SDM command ready */
+ if (i == QM_STOP_CMD_MAX_POLL_COUNT) {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
+ "Timeout when waiting for QM SDM command ready signal\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool qed_send_qm_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 cmd_addr, u32 cmd_data_lsb, u32 cmd_data_msb)
+{
+ if (!qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt))
+ return false;
+
+ qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDADDR, cmd_addr);
+ qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATALSB, cmd_data_lsb);
+ qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATAMSB, cmd_data_msb);
+ qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 1);
+ qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 0);
+
+ return qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt);
+}
+
+/******************** INTERFACE IMPLEMENTATION *********************/
+
+u32 qed_qm_pf_mem_size(u32 num_pf_cids,
+ u32 num_vf_cids,
+ u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs)
+{
+ return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs +
+ QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs +
+ QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
+}
+
+int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn,
+ struct qed_qm_common_rt_init_params *p_params)
+{
+ u32 mask = 0;
+
+ /* Init AFullOprtnstcCrdMask */
+ SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_LINEVOQ,
+ QM_OPPOR_LINE_VOQ_DEF);
+ SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ, QM_BYTE_CRD_EN);
+ SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFWFQ,
+ p_params->pf_wfq_en ? 1 : 0);
+ SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_VPWFQ,
+ p_params->vport_wfq_en ? 1 : 0);
+ SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFRL,
+ p_params->pf_rl_en ? 1 : 0);
+ SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_VPQCNRL,
+ p_params->global_rl_en ? 1 : 0);
+ SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_FWPAUSE, QM_OPPOR_FW_STOP_DEF);
+ SET_FIELD(mask,
+ QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY, QM_OPPOR_PQ_EMPTY_DEF);
+ STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask);
+
+ /* Enable/disable PF RL */
+ qed_enable_pf_rl(p_hwfn, p_params->pf_rl_en);
+
+ /* Enable/disable PF WFQ */
+ qed_enable_pf_wfq(p_hwfn, p_params->pf_wfq_en);
+
+ /* Enable/disable global RL */
+ qed_enable_global_rl(p_hwfn, p_params->global_rl_en);
+
+ /* Enable/disable VPORT WFQ */
+ qed_enable_vport_wfq(p_hwfn, p_params->vport_wfq_en);
+
+ /* Init PBF CMDQ line credit */
+ qed_cmdq_lines_rt_init(p_hwfn,
+ p_params->max_ports_per_engine,
+ p_params->max_phys_tcs_per_port,
+ p_params->port_params);
+
+ /* Init BTB blocks in PBF */
+ qed_btb_blocks_rt_init(p_hwfn,
+ p_params->max_ports_per_engine,
+ p_params->max_phys_tcs_per_port,
+ p_params->port_params);
+
+ qed_global_rl_rt_init(p_hwfn);
+
+ return 0;
+}
+
+int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_qm_pf_rt_init_params *p_params)
+{
+ struct init_qm_vport_params *vport_params = p_params->vport_params;
+ u32 other_mem_size_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids +
+ p_params->num_tids) *
+ QM_OTHER_PQS_PER_PF;
+ u16 i;
+ u8 tc;
+
+ /* Clear first Tx PQ ID array for each VPORT */
+ for (i = 0; i < p_params->num_vports; i++)
+ for (tc = 0; tc < NUM_OF_TCS; tc++)
+ vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID;
+
+ /* Map Other PQs (if any) */
+ qed_other_pq_map_rt_init(p_hwfn,
+ p_params->pf_id,
+ p_params->is_pf_loading, p_params->num_pf_cids,
+ p_params->num_tids, 0);
+
+ /* Map Tx PQs */
+ if (qed_tx_pq_map_rt_init(p_hwfn, p_ptt, p_params, other_mem_size_4kb))
+ return -1;
+
+ /* Init PF WFQ */
+ if (p_params->pf_wfq)
+ if (qed_pf_wfq_rt_init(p_hwfn, p_params))
+ return -1;
+
+ /* Init PF RL */
+ if (qed_pf_rl_rt_init(p_hwfn, p_params->pf_id, p_params->pf_rl))
+ return -1;
+
+ /* Init VPORT WFQ */
+ if (qed_vp_wfq_rt_init(p_hwfn, p_params->num_vports, vport_params))
+ return -1;
+
+ /* Set VPORT RL */
+ if (qed_vport_rl_rt_init(p_hwfn, p_params->start_rl,
+ p_params->num_rls, p_params->link_speed,
+ p_params->rl_params))
+ return -1;
+
+ return 0;
+}
+
+int qed_init_pf_wfq(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 pf_id, u16 pf_wfq)
+{
+ u32 inc_val = QM_PF_WFQ_INC_VAL(pf_wfq);
+
+ if (!inc_val || inc_val > QM_PF_WFQ_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration\n");
+ return -1;
+ }
+
+ qed_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val);
+
+ return 0;
+}
+
+int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 pf_id, u32 pf_rl)
+{
+ u32 inc_val = QM_RL_INC_VAL(pf_rl);
+
+ if (inc_val > QM_PF_RL_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration\n");
+ return -1;
+ }
+
+ qed_wr(p_hwfn,
+ p_ptt, QM_REG_RLPFCRD + pf_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT);
+ qed_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val);
+
+ return 0;
+}
+
+int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 first_tx_pq_id[NUM_OF_TCS], u16 wfq)
+{
+ int result = 0;
+ u16 vport_pq_id;
+ u8 tc;
+
+ for (tc = 0; tc < NUM_OF_TCS && !result; tc++) {
+ vport_pq_id = first_tx_pq_id[tc];
+ if (vport_pq_id != QM_INVALID_PQ_ID)
+ result = qed_init_vport_tc_wfq(p_hwfn, p_ptt,
+ vport_pq_id, wfq);
+ }
+
+ return result;
+}
+
+int qed_init_vport_tc_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u16 first_tx_pq_id, u16 wfq)
+{
+ u32 inc_val;
+
+ if (first_tx_pq_id == QM_INVALID_PQ_ID)
+ return -1;
+
+ inc_val = QM_VP_WFQ_INC_VAL(wfq);
+ if (!inc_val || inc_val > QM_VP_WFQ_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn, "Invalid VPORT WFQ configuration.\n");
+ return -1;
+ }
+
+ qed_wr(p_hwfn, p_ptt, QM_REG_WFQVPCRD + first_tx_pq_id * 4,
+ (u32)QM_WFQ_CRD_REG_SIGN_BIT);
+ qed_wr(p_hwfn, p_ptt, QM_REG_WFQVPUPPERBOUND + first_tx_pq_id * 4,
+ inc_val | QM_WFQ_CRD_REG_SIGN_BIT);
+ qed_wr(p_hwfn, p_ptt, QM_REG_WFQVPWEIGHT + first_tx_pq_id * 4,
+ inc_val);
+
+ return 0;
+}
+
+int qed_init_global_rl(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u16 rl_id, u32 rate_limit,
+ enum init_qm_rl_type vport_rl_type)
+{
+ u32 inc_val, upper_bound;
+
+ upper_bound =
+ (vport_rl_type ==
+ QM_RL_TYPE_QCN) ? QM_GLOBAL_RL_UPPER_BOUND(QM_MAX_LINK_SPEED) :
+ QM_INITIAL_VOQ_BYTE_CRD;
+ inc_val = QM_RL_INC_VAL(rate_limit);
+ if (inc_val > upper_bound) {
+ DP_NOTICE(p_hwfn, "Invalid VPORT rate limit configuration.\n");
+ return -1;
+ }
+
+ qed_wr(p_hwfn, p_ptt,
+ QM_REG_RLGLBLCRD + rl_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT);
+ qed_wr(p_hwfn,
+ p_ptt,
+ QM_REG_RLGLBLUPPERBOUND + rl_id * 4,
+ upper_bound | (u32)QM_RL_CRD_REG_SIGN_BIT);
+ qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + rl_id * 4, inc_val);
+
+ return 0;
+}
+
+bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ bool is_release_cmd,
+ bool is_tx_pq, u16 start_pq, u16 num_pqs)
+{
+ u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 };
+ u32 pq_mask = 0, last_pq, pq_id;
+
+ last_pq = start_pq + num_pqs - 1;
+
+ /* Set command's PQ type */
+ QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1);
+
+ /* Go over requested PQs */
+ for (pq_id = start_pq; pq_id <= last_pq; pq_id++) {
+ /* Set PQ bit in mask (stop command only) */
+ if (!is_release_cmd)
+ pq_mask |= BIT((pq_id % QM_STOP_PQ_MASK_WIDTH));
+
+ /* If last PQ or end of PQ mask, write command */
+ if ((pq_id == last_pq) ||
+ (pq_id % QM_STOP_PQ_MASK_WIDTH ==
+ (QM_STOP_PQ_MASK_WIDTH - 1))) {
+ QM_CMD_SET_FIELD(cmd_arr,
+ QM_STOP_CMD, PAUSE_MASK, pq_mask);
+ QM_CMD_SET_FIELD(cmd_arr,
+ QM_STOP_CMD,
+ GROUP_ID,
+ pq_id / QM_STOP_PQ_MASK_WIDTH);
+ if (!qed_send_qm_cmd(p_hwfn, p_ptt, QM_STOP_CMD_ADDR,
+ cmd_arr[0], cmd_arr[1]))
+ return false;
+ pq_mask = 0;
+ }
+ }
+
+ return true;
+}
+
+#define SET_TUNNEL_TYPE_ENABLE_BIT(var, offset, enable) \
+ do { \
+ typeof(var) *__p_var = &(var); \
+ typeof(offset) __offset = offset; \
+ *__p_var = (*__p_var & ~BIT(__offset)) | \
+ ((enable) ? BIT(__offset) : 0); \
+ } while (0)
+
+#define PRS_ETH_TUNN_OUTPUT_FORMAT 0xF4DAB910
+#define PRS_ETH_OUTPUT_FORMAT 0xFFFF4910
+
+#define ARR_REG_WR(dev, ptt, addr, arr, arr_size) \
+ do { \
+ u32 i; \
+ \
+ for (i = 0; i < (arr_size); i++) \
+ qed_wr(dev, ptt, \
+ ((addr) + (4 * i)), \
+ ((u32 *)&(arr))[i]); \
+ } while (0)
+
+/**
+ * qed_dmae_to_grc() - Internal function for writing from host to
+ * wide-bus registers (split registers are not supported yet).
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT window used for writing the registers.
+ * @p_data: Pointer to source data.
+ * @addr: Destination register address.
+ * @len_in_dwords: Data length in dwords (u32).
+ *
+ * Return: Length of the written data in dwords (u32) or -1 on invalid
+ * input.
+ */
+static int qed_dmae_to_grc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ __le32 *p_data, u32 addr, u32 len_in_dwords)
+{
+ struct qed_dmae_params params = { 0 };
+ u32 *data_cpu;
+ int rc;
+
+ if (!p_data)
+ return -1;
+
+ /* Set DMAE params */
+ SET_FIELD(params.flags, QED_DMAE_PARAMS_COMPLETION_DST, 1);
+
+ /* Execute DMAE command */
+ rc = qed_dmae_host2grc(p_hwfn, p_ptt,
+ (u64)(uintptr_t)(p_data),
+ addr, len_in_dwords, &params);
+
+ /* If not read using DMAE, read using GRC */
+ if (rc) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DEBUG,
+ "Failed writing to chip using DMAE, using GRC instead\n");
+
+ /* Swap to CPU byteorder and write to registers using GRC */
+ data_cpu = (__force u32 *)p_data;
+ le32_to_cpu_array(data_cpu, len_in_dwords);
+
+ ARR_REG_WR(p_hwfn, p_ptt, addr, data_cpu, len_in_dwords);
+ cpu_to_le32_array(data_cpu, len_in_dwords);
+ }
+
+ return len_in_dwords;
+}
+
+void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u16 dest_port)
+{
+ /* Update PRS register */
+ qed_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
+
+ /* Update NIG register */
+ qed_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_CTRL, dest_port);
+
+ /* Update PBF register */
+ qed_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port);
+}
+
+void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, bool vxlan_enable)
+{
+ u32 reg_val;
+ u8 shift;
+
+ /* Update PRS register */
+ reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
+ SET_FIELD(reg_val,
+ PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE, vxlan_enable);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
+ if (reg_val) {
+ reg_val =
+ qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0);
+
+ /* Update output only if tunnel blocks not included. */
+ if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
+ qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
+ (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
+ }
+
+ /* Update NIG register */
+ reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
+ shift = NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT;
+ SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, vxlan_enable);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
+
+ /* Update DORQ register */
+ qed_wr(p_hwfn,
+ p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN, vxlan_enable ? 1 : 0);
+}
+
+void qed_set_gre_enable(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ bool eth_gre_enable, bool ip_gre_enable)
+{
+ u32 reg_val;
+ u8 shift;
+
+ /* Update PRS register */
+ reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
+ SET_FIELD(reg_val,
+ PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE,
+ eth_gre_enable);
+ SET_FIELD(reg_val,
+ PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE,
+ ip_gre_enable);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
+ if (reg_val) {
+ reg_val =
+ qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0);
+
+ /* Update output only if tunnel blocks not included. */
+ if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
+ qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
+ (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
+ }
+
+ /* Update NIG register */
+ reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
+ shift = NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT;
+ SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, eth_gre_enable);
+ shift = NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT;
+ SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, ip_gre_enable);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
+
+ /* Update DORQ registers */
+ qed_wr(p_hwfn,
+ p_ptt,
+ DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN, eth_gre_enable ? 1 : 0);
+ qed_wr(p_hwfn,
+ p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN, ip_gre_enable ? 1 : 0);
+}
+
+void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u16 dest_port)
+{
+ /* Update PRS register */
+ qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
+
+ /* Update NIG register */
+ qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port);
+
+ /* Update PBF register */
+ qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port);
+}
+
+void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ bool eth_geneve_enable, bool ip_geneve_enable)
+{
+ u32 reg_val;
+
+ /* Update PRS register */
+ reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
+ SET_FIELD(reg_val,
+ PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE,
+ eth_geneve_enable);
+ SET_FIELD(reg_val,
+ PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE,
+ ip_geneve_enable);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
+ if (reg_val) {
+ reg_val =
+ qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0);
+
+ /* Update output only if tunnel blocks not included. */
+ if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
+ qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
+ (u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
+ }
+
+ /* Update NIG register */
+ qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE,
+ eth_geneve_enable ? 1 : 0);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, ip_geneve_enable ? 1 : 0);
+
+ /* EDPM with geneve tunnel not supported in BB */
+ if (QED_IS_BB_B0(p_hwfn->cdev))
+ return;
+
+ /* Update DORQ registers */
+ qed_wr(p_hwfn,
+ p_ptt,
+ DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2,
+ eth_geneve_enable ? 1 : 0);
+ qed_wr(p_hwfn,
+ p_ptt,
+ DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2,
+ ip_geneve_enable ? 1 : 0);
+}
+
+#define PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET 3
+#define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT 0xC8DAB910
+
+void qed_set_vxlan_no_l2_enable(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, bool enable)
+{
+ u32 reg_val, cfg_mask;
+
+ /* read PRS config register */
+ reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_MSG_INFO);
+
+ /* set VXLAN_NO_L2_ENABLE mask */
+ cfg_mask = BIT(PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET);
+
+ if (enable) {
+ /* set VXLAN_NO_L2_ENABLE flag */
+ reg_val |= cfg_mask;
+
+ /* update PRS FIC register */
+ qed_wr(p_hwfn,
+ p_ptt,
+ PRS_REG_OUTPUT_FORMAT_4_0,
+ (u32)PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT);
+ } else {
+ /* clear VXLAN_NO_L2_ENABLE flag */
+ reg_val &= ~cfg_mask;
+ }
+
+ /* write PRS config register */
+ qed_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, reg_val);
+}
+
+#define T_ETH_PACKET_ACTION_GFT_EVENTID 23
+#define PARSER_ETH_CONN_GFT_ACTION_CM_HDR 272
+#define T_ETH_PACKET_MATCH_RFS_EVENTID 25
+#define PARSER_ETH_CONN_CM_HDR 0
+#define CAM_LINE_SIZE sizeof(u32)
+#define RAM_LINE_SIZE sizeof(u64)
+#define REG_SIZE sizeof(u32)
+
+void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id)
+{
+ struct regpair ram_line = { 0 };
+
+ /* Disable gft search for PF */
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0);
+
+ /* Clean ram & cam for next gft session */
+
+ /* Zero camline */
+ qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, 0);
+
+ /* Zero ramline */
+ qed_dmae_to_grc(p_hwfn, p_ptt, &ram_line.lo,
+ PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
+ sizeof(ram_line) / REG_SIZE);
+}
+
+void qed_gft_config(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 pf_id,
+ bool tcp,
+ bool udp,
+ bool ipv4, bool ipv6, enum gft_profile_type profile_type)
+{
+ struct regpair ram_line;
+ u32 search_non_ip_as_gft;
+ u32 reg_val, cam_line;
+ u32 lo = 0, hi = 0;
+
+ if (!ipv6 && !ipv4)
+ DP_NOTICE(p_hwfn,
+ "gft_config: must accept at least on of - ipv4 or ipv6'\n");
+ if (!tcp && !udp)
+ DP_NOTICE(p_hwfn,
+ "gft_config: must accept at least on of - udp or tcp\n");
+ if (profile_type >= MAX_GFT_PROFILE_TYPE)
+ DP_NOTICE(p_hwfn, "gft_config: unsupported gft_profile_type\n");
+
+ /* Set RFS event ID to be awakened i Tstorm By Prs */
+ reg_val = T_ETH_PACKET_MATCH_RFS_EVENTID <<
+ PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
+ reg_val |= PARSER_ETH_CONN_CM_HDR << PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
+ qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, reg_val);
+
+ /* Do not load context only cid in PRS on match. */
+ qed_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0);
+
+ /* Do not use tenant ID exist bit for gft search */
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TENANT_ID, 0);
+
+ /* Set Cam */
+ cam_line = 0;
+ SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_VALID, 1);
+
+ /* Filters are per PF!! */
+ SET_FIELD(cam_line,
+ GFT_CAM_LINE_MAPPED_PF_ID_MASK,
+ GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK);
+ SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
+
+ if (!(tcp && udp)) {
+ SET_FIELD(cam_line,
+ GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK,
+ GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK);
+ if (tcp)
+ SET_FIELD(cam_line,
+ GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
+ GFT_PROFILE_TCP_PROTOCOL);
+ else
+ SET_FIELD(cam_line,
+ GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
+ GFT_PROFILE_UDP_PROTOCOL);
+ }
+
+ if (!(ipv4 && ipv6)) {
+ SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1);
+ if (ipv4)
+ SET_FIELD(cam_line,
+ GFT_CAM_LINE_MAPPED_IP_VERSION,
+ GFT_PROFILE_IPV4);
+ else
+ SET_FIELD(cam_line,
+ GFT_CAM_LINE_MAPPED_IP_VERSION,
+ GFT_PROFILE_IPV6);
+ }
+
+ /* Write characteristics to cam */
+ qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
+ cam_line);
+ cam_line =
+ qed_rd(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id);
+
+ /* Write line to RAM - compare to filter 4 tuple */
+
+ /* Search no IP as GFT */
+ search_non_ip_as_gft = 0;
+
+ /* Tunnel type */
+ SET_FIELD(lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1);
+ SET_FIELD(lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1);
+
+ if (profile_type == GFT_PROFILE_TYPE_4_TUPLE) {
+ SET_FIELD(hi, GFT_RAM_LINE_DST_IP, 1);
+ SET_FIELD(hi, GFT_RAM_LINE_SRC_IP, 1);
+ SET_FIELD(hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
+ SET_FIELD(lo, GFT_RAM_LINE_ETHERTYPE, 1);
+ SET_FIELD(lo, GFT_RAM_LINE_SRC_PORT, 1);
+ SET_FIELD(lo, GFT_RAM_LINE_DST_PORT, 1);
+ } else if (profile_type == GFT_PROFILE_TYPE_L4_DST_PORT) {
+ SET_FIELD(hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
+ SET_FIELD(lo, GFT_RAM_LINE_ETHERTYPE, 1);
+ SET_FIELD(lo, GFT_RAM_LINE_DST_PORT, 1);
+ } else if (profile_type == GFT_PROFILE_TYPE_IP_DST_ADDR) {
+ SET_FIELD(hi, GFT_RAM_LINE_DST_IP, 1);
+ SET_FIELD(lo, GFT_RAM_LINE_ETHERTYPE, 1);
+ } else if (profile_type == GFT_PROFILE_TYPE_IP_SRC_ADDR) {
+ SET_FIELD(hi, GFT_RAM_LINE_SRC_IP, 1);
+ SET_FIELD(lo, GFT_RAM_LINE_ETHERTYPE, 1);
+ } else if (profile_type == GFT_PROFILE_TYPE_TUNNEL_TYPE) {
+ SET_FIELD(lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1);
+
+ /* Allow tunneled traffic without inner IP */
+ search_non_ip_as_gft = 1;
+ }
+
+ ram_line.lo = cpu_to_le32(lo);
+ ram_line.hi = cpu_to_le32(hi);
+
+ qed_wr(p_hwfn,
+ p_ptt, PRS_REG_SEARCH_NON_IP_AS_GFT, search_non_ip_as_gft);
+ qed_dmae_to_grc(p_hwfn, p_ptt, &ram_line.lo,
+ PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
+ sizeof(ram_line) / REG_SIZE);
+
+ /* Set default profile so that no filter match will happen */
+ ram_line.lo = cpu_to_le32(0xffffffff);
+ ram_line.hi = cpu_to_le32(0x3ff);
+ qed_dmae_to_grc(p_hwfn, p_ptt, &ram_line.lo,
+ PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
+ PRS_GFT_CAM_LINES_NO_MATCH,
+ sizeof(ram_line) / REG_SIZE);
+
+ /* Enable gft search */
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
+}
+
+DECLARE_CRC8_TABLE(cdu_crc8_table);
+
+/* Calculate and return CDU validation byte per connection type/region/cid */
+static u8 qed_calc_cdu_validation_byte(u8 conn_type, u8 region, u32 cid)
+{
+ const u8 validation_cfg = CDU_VALIDATION_DEFAULT_CFG;
+ u8 crc, validation_byte = 0;
+ static u8 crc8_table_valid; /* automatically initialized to 0 */
+ u32 validation_string = 0;
+ __be32 data_to_crc;
+
+ if (!crc8_table_valid) {
+ crc8_populate_msb(cdu_crc8_table, 0x07);
+ crc8_table_valid = 1;
+ }
+
+ /* The CRC is calculated on the String-to-compress:
+ * [31:8] = {CID[31:20],CID[11:0]}
+ * [7:4] = Region
+ * [3:0] = Type
+ */
+ if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_CID) & 1)
+ validation_string |= (cid & 0xFFF00000) | ((cid & 0xFFF) << 8);
+
+ if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_REGION) & 1)
+ validation_string |= ((region & 0xF) << 4);
+
+ if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_TYPE) & 1)
+ validation_string |= (conn_type & 0xF);
+
+ /* Convert to big-endian and calculate CRC8 */
+ data_to_crc = cpu_to_be32(validation_string);
+ crc = crc8(cdu_crc8_table, (u8 *)&data_to_crc, sizeof(data_to_crc),
+ CRC8_INIT_VALUE);
+
+ /* The validation byte [7:0] is composed:
+ * for type A validation
+ * [7] = active configuration bit
+ * [6:0] = crc[6:0]
+ *
+ * for type B validation
+ * [7] = active configuration bit
+ * [6:3] = connection_type[3:0]
+ * [2:0] = crc[2:0]
+ */
+ validation_byte |=
+ ((validation_cfg >>
+ CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE) & 1) << 7;
+
+ if ((validation_cfg >>
+ CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT) & 1)
+ validation_byte |= ((conn_type & 0xF) << 3) | (crc & 0x7);
+ else
+ validation_byte |= crc & 0x7F;
+
+ return validation_byte;
+}
+
+/* Calcualte and set validation bytes for session context */
+void qed_calc_session_ctx_validation(void *p_ctx_mem,
+ u16 ctx_size, u8 ctx_type, u32 cid)
+{
+ u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
+
+ p_ctx = (u8 * const)p_ctx_mem;
+ x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
+ t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
+ u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
+
+ memset(p_ctx, 0, ctx_size);
+
+ *x_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 3, cid);
+ *t_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 4, cid);
+ *u_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 5, cid);
+}
+
+/* Calcualte and set validation bytes for task context */
+void qed_calc_task_ctx_validation(void *p_ctx_mem,
+ u16 ctx_size, u8 ctx_type, u32 tid)
+{
+ u8 *p_ctx, *region1_val_ptr;
+
+ p_ctx = (u8 * const)p_ctx_mem;
+ region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
+
+ memset(p_ctx, 0, ctx_size);
+
+ *region1_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 1, tid);
+}
+
+/* Memset session context to 0 while preserving validation bytes */
+void qed_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
+{
+ u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
+ u8 x_val, t_val, u_val;
+
+ p_ctx = (u8 * const)p_ctx_mem;
+ x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
+ t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
+ u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
+
+ x_val = *x_val_ptr;
+ t_val = *t_val_ptr;
+ u_val = *u_val_ptr;
+
+ memset(p_ctx, 0, ctx_size);
+
+ *x_val_ptr = x_val;
+ *t_val_ptr = t_val;
+ *u_val_ptr = u_val;
+}
+
+/* Memset task context to 0 while preserving validation bytes */
+void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
+{
+ u8 *p_ctx, *region1_val_ptr;
+ u8 region1_val;
+
+ p_ctx = (u8 * const)p_ctx_mem;
+ region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
+
+ region1_val = *region1_val_ptr;
+
+ memset(p_ctx, 0, ctx_size);
+
+ *region1_val_ptr = region1_val;
+}
+
+/* Enable and configure context validation */
+void qed_enable_context_validation(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 ctx_validation;
+
+ /* Enable validation for connection region 3: CCFC_CTX_VALID0[31:24] */
+ ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 24;
+ qed_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID0, ctx_validation);
+
+ /* Enable validation for connection region 5: CCFC_CTX_VALID1[15:8] */
+ ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
+ qed_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID1, ctx_validation);
+
+ /* Enable validation for connection region 1: TCFC_CTX_VALID0[15:8] */
+ ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
+ qed_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation);
+}
+
+const char *qed_get_protocol_type_str(u32 protocol_type)
+{
+ if (protocol_type >= ARRAY_SIZE(s_protocol_types))
+ return "Invalid protocol type";
+
+ return s_protocol_types[protocol_type];
+}
+
+const char *qed_get_ramrod_cmd_id_str(u32 protocol_type, u32 ramrod_cmd_id)
+{
+ const char *ramrod_cmd_id_str;
+
+ if (protocol_type >= ARRAY_SIZE(s_ramrod_cmd_ids))
+ return "Invalid protocol type";
+
+ if (ramrod_cmd_id >= ARRAY_SIZE(s_ramrod_cmd_ids[0]))
+ return "Invalid Ramrod command ID";
+
+ ramrod_cmd_id_str = s_ramrod_cmd_ids[protocol_type][ramrod_cmd_id];
+
+ if (!ramrod_cmd_id_str)
+ return "Invalid Ramrod command ID";
+
+ return ramrod_cmd_id_str;
+}
+
+static u32 qed_get_rdma_assert_ram_addr(struct qed_hwfn *p_hwfn, u8 storm_id)
+{
+ switch (storm_id) {
+ case 0:
+ return TSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
+ TSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
+ case 1:
+ return MSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
+ MSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
+ case 2:
+ return USEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
+ USTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
+ case 3:
+ return XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
+ XSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
+ case 4:
+ return YSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
+ YSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
+ case 5:
+ return PSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
+ PSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
+
+ default:
+ return 0;
+ }
+}
+
+void qed_set_rdma_error_level(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u8 assert_level[NUM_STORMS])
+{
+ u8 storm_id;
+
+ for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) {
+ u32 ram_addr = qed_get_rdma_assert_ram_addr(p_hwfn, storm_id);
+
+ qed_wr(p_hwfn, p_ptt, ram_addr, assert_level[storm_id]);
+ }
+}
+
+#define PHYS_ADDR_DWORDS DIV_ROUND_UP(sizeof(dma_addr_t), 4)
+#define OVERLAY_HDR_SIZE_DWORDS (sizeof(struct fw_overlay_buf_hdr) / 4)
+
+static u32 qed_get_overlay_addr_ram_addr(struct qed_hwfn *p_hwfn, u8 storm_id)
+{
+ switch (storm_id) {
+ case 0:
+ return TSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
+ TSTORM_OVERLAY_BUF_ADDR_OFFSET;
+ case 1:
+ return MSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
+ MSTORM_OVERLAY_BUF_ADDR_OFFSET;
+ case 2:
+ return USEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
+ USTORM_OVERLAY_BUF_ADDR_OFFSET;
+ case 3:
+ return XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
+ XSTORM_OVERLAY_BUF_ADDR_OFFSET;
+ case 4:
+ return YSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
+ YSTORM_OVERLAY_BUF_ADDR_OFFSET;
+ case 5:
+ return PSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
+ PSTORM_OVERLAY_BUF_ADDR_OFFSET;
+
+ default:
+ return 0;
+ }
+}
+
+struct phys_mem_desc *qed_fw_overlay_mem_alloc(struct qed_hwfn *p_hwfn,
+ const u32 * const
+ fw_overlay_in_buf,
+ u32 buf_size_in_bytes)
+{
+ u32 buf_size = buf_size_in_bytes / sizeof(u32), buf_offset = 0;
+ struct phys_mem_desc *allocated_mem;
+
+ if (!buf_size)
+ return NULL;
+
+ allocated_mem = kcalloc(NUM_STORMS, sizeof(struct phys_mem_desc),
+ GFP_KERNEL);
+ if (!allocated_mem)
+ return NULL;
+
+ /* For each Storm, set physical address in RAM */
+ while (buf_offset < buf_size) {
+ struct phys_mem_desc *storm_mem_desc;
+ struct fw_overlay_buf_hdr *hdr;
+ u32 storm_buf_size;
+ u8 storm_id;
+
+ hdr =
+ (struct fw_overlay_buf_hdr *)&fw_overlay_in_buf[buf_offset];
+ storm_buf_size = GET_FIELD(hdr->data,
+ FW_OVERLAY_BUF_HDR_BUF_SIZE);
+ storm_id = GET_FIELD(hdr->data, FW_OVERLAY_BUF_HDR_STORM_ID);
+ if (storm_id >= NUM_STORMS)
+ break;
+ storm_mem_desc = allocated_mem + storm_id;
+ storm_mem_desc->size = storm_buf_size * sizeof(u32);
+
+ /* Allocate physical memory for Storm's overlays buffer */
+ storm_mem_desc->virt_addr =
+ dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ storm_mem_desc->size,
+ &storm_mem_desc->phys_addr, GFP_KERNEL);
+ if (!storm_mem_desc->virt_addr)
+ break;
+
+ /* Skip overlays buffer header */
+ buf_offset += OVERLAY_HDR_SIZE_DWORDS;
+
+ /* Copy Storm's overlays buffer to allocated memory */
+ memcpy(storm_mem_desc->virt_addr,
+ &fw_overlay_in_buf[buf_offset], storm_mem_desc->size);
+
+ /* Advance to next Storm */
+ buf_offset += storm_buf_size;
+ }
+
+ /* If memory allocation has failed, free all allocated memory */
+ if (buf_offset < buf_size) {
+ qed_fw_overlay_mem_free(p_hwfn, &allocated_mem);
+ return NULL;
+ }
+
+ return allocated_mem;
+}
+
+void qed_fw_overlay_init_ram(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct phys_mem_desc *fw_overlay_mem)
+{
+ u8 storm_id;
+
+ for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) {
+ struct phys_mem_desc *storm_mem_desc =
+ (struct phys_mem_desc *)fw_overlay_mem + storm_id;
+ u32 ram_addr, i;
+
+ /* Skip Storms with no FW overlays */
+ if (!storm_mem_desc->virt_addr)
+ continue;
+
+ /* Calculate overlay RAM GRC address of current PF */
+ ram_addr = qed_get_overlay_addr_ram_addr(p_hwfn, storm_id) +
+ sizeof(dma_addr_t) * p_hwfn->rel_pf_id;
+
+ /* Write Storm's overlay physical address to RAM */
+ for (i = 0; i < PHYS_ADDR_DWORDS; i++, ram_addr += sizeof(u32))
+ qed_wr(p_hwfn, p_ptt, ram_addr,
+ ((u32 *)&storm_mem_desc->phys_addr)[i]);
+ }
+}
+
+void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn,
+ struct phys_mem_desc **fw_overlay_mem)
+{
+ u8 storm_id;
+
+ if (!fw_overlay_mem || !(*fw_overlay_mem))
+ return;
+
+ for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) {
+ struct phys_mem_desc *storm_mem_desc =
+ (struct phys_mem_desc *)*fw_overlay_mem + storm_id;
+
+ /* Free Storm's physical memory */
+ if (storm_mem_desc->virt_addr)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ storm_mem_desc->size,
+ storm_mem_desc->virt_addr,
+ storm_mem_desc->phys_addr);
+ }
+
+ /* Free allocated virtual memory */
+ kfree(*fw_overlay_mem);
+ *fw_overlay_mem = NULL;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
new file mode 100644
index 000000000..b3bf9899c
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
@@ -0,0 +1,657 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ * Copyright (c) 2019-2020 Marvell International Ltd.
+ */
+
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include "qed.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_init_ops.h"
+#include "qed_iro_hsi.h"
+#include "qed_reg_addr.h"
+#include "qed_sriov.h"
+
+#define QED_INIT_MAX_POLL_COUNT 100
+#define QED_INIT_POLL_PERIOD_US 500
+
+static u32 pxp_global_win[] = {
+ 0,
+ 0,
+ 0x1c02, /* win 2: addr=0x1c02000, size=4096 bytes */
+ 0x1c80, /* win 3: addr=0x1c80000, size=4096 bytes */
+ 0x1d00, /* win 4: addr=0x1d00000, size=4096 bytes */
+ 0x1d01, /* win 5: addr=0x1d01000, size=4096 bytes */
+ 0x1d02, /* win 6: addr=0x1d02000, size=4096 bytes */
+ 0x1d80, /* win 7: addr=0x1d80000, size=4096 bytes */
+ 0x1d81, /* win 8: addr=0x1d81000, size=4096 bytes */
+ 0x1d82, /* win 9: addr=0x1d82000, size=4096 bytes */
+ 0x1e00, /* win 10: addr=0x1e00000, size=4096 bytes */
+ 0x1e01, /* win 11: addr=0x1e01000, size=4096 bytes */
+ 0x1e80, /* win 12: addr=0x1e80000, size=4096 bytes */
+ 0x1f00, /* win 13: addr=0x1f00000, size=4096 bytes */
+ 0x1c08, /* win 14: addr=0x1c08000, size=4096 bytes */
+ 0,
+ 0,
+ 0,
+ 0,
+};
+
+/* IRO Array */
+static const u32 iro_arr[] = {
+ 0x00000000, 0x00000000, 0x00080000,
+ 0x00004478, 0x00000008, 0x00080000,
+ 0x00003288, 0x00000088, 0x00880000,
+ 0x000058a8, 0x00000020, 0x00200000,
+ 0x00003188, 0x00000008, 0x00080000,
+ 0x00000b00, 0x00000008, 0x00040000,
+ 0x00000a80, 0x00000008, 0x00040000,
+ 0x00000000, 0x00000008, 0x00020000,
+ 0x00000080, 0x00000008, 0x00040000,
+ 0x00000084, 0x00000008, 0x00020000,
+ 0x00005798, 0x00000004, 0x00040000,
+ 0x00004e50, 0x00000000, 0x00780000,
+ 0x00003e40, 0x00000000, 0x00780000,
+ 0x00004500, 0x00000000, 0x00780000,
+ 0x00003210, 0x00000000, 0x00780000,
+ 0x00003b50, 0x00000000, 0x00780000,
+ 0x00007f58, 0x00000000, 0x00780000,
+ 0x00005fd8, 0x00000000, 0x00080000,
+ 0x00007100, 0x00000000, 0x00080000,
+ 0x0000af20, 0x00000000, 0x00080000,
+ 0x00004398, 0x00000000, 0x00080000,
+ 0x0000a5a0, 0x00000000, 0x00080000,
+ 0x0000bde8, 0x00000000, 0x00080000,
+ 0x00000020, 0x00000004, 0x00040000,
+ 0x00005688, 0x00000010, 0x00100000,
+ 0x0000c210, 0x00000030, 0x00300000,
+ 0x0000b108, 0x00000038, 0x00380000,
+ 0x00003d20, 0x00000080, 0x00400000,
+ 0x0000bf60, 0x00000000, 0x00040000,
+ 0x00004560, 0x00040080, 0x00040000,
+ 0x000001f8, 0x00000004, 0x00040000,
+ 0x00003d60, 0x00000080, 0x00200000,
+ 0x00008960, 0x00000040, 0x00300000,
+ 0x0000e840, 0x00000060, 0x00600000,
+ 0x00004698, 0x00000080, 0x00380000,
+ 0x000107b8, 0x000000c0, 0x00c00000,
+ 0x000001f8, 0x00000002, 0x00020000,
+ 0x0000a260, 0x00000000, 0x01080000,
+ 0x0000a368, 0x00000008, 0x00080000,
+ 0x000001c0, 0x00000008, 0x00080000,
+ 0x000001f8, 0x00000008, 0x00080000,
+ 0x00000ac0, 0x00000008, 0x00080000,
+ 0x00002578, 0x00000008, 0x00080000,
+ 0x000024f8, 0x00000008, 0x00080000,
+ 0x00000280, 0x00000008, 0x00080000,
+ 0x00000680, 0x00080018, 0x00080000,
+ 0x00000b78, 0x00080018, 0x00020000,
+ 0x0000c600, 0x00000058, 0x003c0000,
+ 0x00012038, 0x00000020, 0x00100000,
+ 0x00011b00, 0x00000048, 0x00180000,
+ 0x00009650, 0x00000050, 0x00200000,
+ 0x00008b10, 0x00000040, 0x00280000,
+ 0x000116c0, 0x00000018, 0x00100000,
+ 0x0000c808, 0x00000048, 0x00380000,
+ 0x00011790, 0x00000020, 0x00200000,
+ 0x000046d0, 0x00000080, 0x00100000,
+ 0x00003618, 0x00000010, 0x00100000,
+ 0x0000a9e8, 0x00000008, 0x00010000,
+ 0x000097a0, 0x00000008, 0x00010000,
+ 0x00011a10, 0x00000008, 0x00010000,
+ 0x0000e9f8, 0x00000008, 0x00010000,
+ 0x00012648, 0x00000008, 0x00010000,
+ 0x000121c8, 0x00000008, 0x00010000,
+ 0x0000af08, 0x00000030, 0x00100000,
+ 0x0000d748, 0x00000028, 0x00280000,
+ 0x00009e68, 0x00000018, 0x00180000,
+ 0x00009fe8, 0x00000008, 0x00080000,
+ 0x00013ea8, 0x00000008, 0x00080000,
+ 0x00012f18, 0x00000018, 0x00180000,
+ 0x0000dfe8, 0x00500288, 0x00100000,
+ 0x000131a0, 0x00000138, 0x00280000,
+};
+
+void qed_init_iro_array(struct qed_dev *cdev)
+{
+ cdev->iro_arr = iro_arr + E4_IRO_ARR_OFFSET;
+}
+
+void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn, u32 rt_offset, u32 val)
+{
+ if (rt_offset >= RUNTIME_ARRAY_SIZE) {
+ DP_ERR(p_hwfn,
+ "Avoid storing %u in rt_data at index %u!\n",
+ val, rt_offset);
+ return;
+ }
+
+ p_hwfn->rt_data.init_val[rt_offset] = val;
+ p_hwfn->rt_data.b_valid[rt_offset] = true;
+}
+
+void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn,
+ u32 rt_offset, u32 *p_val, size_t size)
+{
+ size_t i;
+
+ if ((rt_offset + size - 1) >= RUNTIME_ARRAY_SIZE) {
+ DP_ERR(p_hwfn,
+ "Avoid storing values in rt_data at indices %u-%u!\n",
+ rt_offset,
+ (u32)(rt_offset + size - 1));
+ return;
+ }
+
+ for (i = 0; i < size / sizeof(u32); i++) {
+ p_hwfn->rt_data.init_val[rt_offset + i] = p_val[i];
+ p_hwfn->rt_data.b_valid[rt_offset + i] = true;
+ }
+}
+
+static int qed_init_rt(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 addr, u16 rt_offset, u16 size, bool b_must_dmae)
+{
+ u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset];
+ bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset];
+ u16 i, j, segment;
+ int rc = 0;
+
+ /* Since not all RT entries are initialized, go over the RT and
+ * for each segment of initialized values use DMA.
+ */
+ for (i = 0; i < size; i++) {
+ if (!p_valid[i])
+ continue;
+
+ /* In case there isn't any wide-bus configuration here,
+ * simply write the data instead of using dmae.
+ */
+ if (!b_must_dmae) {
+ qed_wr(p_hwfn, p_ptt, addr + (i << 2), p_init_val[i]);
+ p_valid[i] = false;
+ continue;
+ }
+
+ /* Start of a new segment */
+ for (segment = 1; i + segment < size; segment++)
+ if (!p_valid[i + segment])
+ break;
+
+ rc = qed_dmae_host2grc(p_hwfn, p_ptt,
+ (uintptr_t)(p_init_val + i),
+ addr + (i << 2), segment, NULL);
+ if (rc)
+ return rc;
+
+ /* invalidate after writing */
+ for (j = i; j < (u32)(i + segment); j++)
+ p_valid[j] = false;
+
+ /* Jump over the entire segment, including invalid entry */
+ i += segment;
+ }
+
+ return rc;
+}
+
+int qed_init_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_rt_data *rt_data = &p_hwfn->rt_data;
+
+ if (IS_VF(p_hwfn->cdev))
+ return 0;
+
+ rt_data->b_valid = kcalloc(RUNTIME_ARRAY_SIZE, sizeof(bool),
+ GFP_KERNEL);
+ if (!rt_data->b_valid)
+ return -ENOMEM;
+
+ rt_data->init_val = kcalloc(RUNTIME_ARRAY_SIZE, sizeof(u32),
+ GFP_KERNEL);
+ if (!rt_data->init_val) {
+ kfree(rt_data->b_valid);
+ rt_data->b_valid = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void qed_init_free(struct qed_hwfn *p_hwfn)
+{
+ kfree(p_hwfn->rt_data.init_val);
+ p_hwfn->rt_data.init_val = NULL;
+ kfree(p_hwfn->rt_data.b_valid);
+ p_hwfn->rt_data.b_valid = NULL;
+}
+
+static int qed_init_array_dmae(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 addr,
+ u32 dmae_data_offset,
+ u32 size,
+ const u32 *buf,
+ bool b_must_dmae,
+ bool b_can_dmae)
+{
+ int rc = 0;
+
+ /* Perform DMAE only for lengthy enough sections or for wide-bus */
+ if (!b_can_dmae || (!b_must_dmae && (size < 16))) {
+ const u32 *data = buf + dmae_data_offset;
+ u32 i;
+
+ for (i = 0; i < size; i++)
+ qed_wr(p_hwfn, p_ptt, addr + (i << 2), data[i]);
+ } else {
+ rc = qed_dmae_host2grc(p_hwfn, p_ptt,
+ (uintptr_t)(buf + dmae_data_offset),
+ addr, size, NULL);
+ }
+
+ return rc;
+}
+
+static int qed_init_fill_dmae(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 addr, u32 fill_count)
+{
+ static u32 zero_buffer[DMAE_MAX_RW_SIZE];
+ struct qed_dmae_params params = {};
+
+ memset(zero_buffer, 0, sizeof(u32) * DMAE_MAX_RW_SIZE);
+
+ /* invoke the DMAE virtual/physical buffer API with
+ * 1. DMAE init channel
+ * 2. addr,
+ * 3. p_hwfb->temp_data,
+ * 4. fill_count
+ */
+ SET_FIELD(params.flags, QED_DMAE_PARAMS_RW_REPL_SRC, 0x1);
+ return qed_dmae_host2grc(p_hwfn, p_ptt,
+ (uintptr_t)(&zero_buffer[0]),
+ addr, fill_count, &params);
+}
+
+static void qed_init_fill(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 addr, u32 fill, u32 fill_count)
+{
+ u32 i;
+
+ for (i = 0; i < fill_count; i++, addr += sizeof(u32))
+ qed_wr(p_hwfn, p_ptt, addr, fill);
+}
+
+static int qed_init_cmd_array(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct init_write_op *cmd,
+ bool b_must_dmae, bool b_can_dmae)
+{
+ u32 dmae_array_offset = le32_to_cpu(cmd->args.array_offset);
+ u32 data = le32_to_cpu(cmd->data);
+ u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
+
+ u32 offset, output_len, input_len, max_size;
+ struct qed_dev *cdev = p_hwfn->cdev;
+ union init_array_hdr *hdr;
+ const u32 *array_data;
+ int rc = 0;
+ u32 size;
+
+ array_data = cdev->fw_data->arr_data;
+
+ hdr = (union init_array_hdr *)(array_data + dmae_array_offset);
+ data = le32_to_cpu(hdr->raw.data);
+ switch (GET_FIELD(data, INIT_ARRAY_RAW_HDR_TYPE)) {
+ case INIT_ARR_ZIPPED:
+ offset = dmae_array_offset + 1;
+ input_len = GET_FIELD(data,
+ INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE);
+ max_size = MAX_ZIPPED_SIZE * 4;
+ memset(p_hwfn->unzip_buf, 0, max_size);
+
+ output_len = qed_unzip_data(p_hwfn, input_len,
+ (u8 *)&array_data[offset],
+ max_size, (u8 *)p_hwfn->unzip_buf);
+ if (output_len) {
+ rc = qed_init_array_dmae(p_hwfn, p_ptt, addr, 0,
+ output_len,
+ p_hwfn->unzip_buf,
+ b_must_dmae, b_can_dmae);
+ } else {
+ DP_NOTICE(p_hwfn, "Failed to unzip dmae data\n");
+ rc = -EINVAL;
+ }
+ break;
+ case INIT_ARR_PATTERN:
+ {
+ u32 repeats = GET_FIELD(data,
+ INIT_ARRAY_PATTERN_HDR_REPETITIONS);
+ u32 i;
+
+ size = GET_FIELD(data, INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE);
+
+ for (i = 0; i < repeats; i++, addr += size << 2) {
+ rc = qed_init_array_dmae(p_hwfn, p_ptt, addr,
+ dmae_array_offset + 1,
+ size, array_data,
+ b_must_dmae, b_can_dmae);
+ if (rc)
+ break;
+ }
+ break;
+ }
+ case INIT_ARR_STANDARD:
+ size = GET_FIELD(data, INIT_ARRAY_STANDARD_HDR_SIZE);
+ rc = qed_init_array_dmae(p_hwfn, p_ptt, addr,
+ dmae_array_offset + 1,
+ size, array_data,
+ b_must_dmae, b_can_dmae);
+ break;
+ }
+
+ return rc;
+}
+
+/* init_ops write command */
+static int qed_init_cmd_wr(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct init_write_op *p_cmd, bool b_can_dmae)
+{
+ u32 data = le32_to_cpu(p_cmd->data);
+ bool b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS);
+ u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
+ union init_write_args *arg = &p_cmd->args;
+ int rc = 0;
+
+ /* Sanitize */
+ if (b_must_dmae && !b_can_dmae) {
+ DP_NOTICE(p_hwfn,
+ "Need to write to %08x for Wide-bus but DMAE isn't allowed\n",
+ addr);
+ return -EINVAL;
+ }
+
+ switch (GET_FIELD(data, INIT_WRITE_OP_SOURCE)) {
+ case INIT_SRC_INLINE:
+ data = le32_to_cpu(p_cmd->args.inline_val);
+ qed_wr(p_hwfn, p_ptt, addr, data);
+ break;
+ case INIT_SRC_ZEROS:
+ data = le32_to_cpu(p_cmd->args.zeros_count);
+ if (b_must_dmae || (b_can_dmae && (data >= 64)))
+ rc = qed_init_fill_dmae(p_hwfn, p_ptt, addr, data);
+ else
+ qed_init_fill(p_hwfn, p_ptt, addr, 0, data);
+ break;
+ case INIT_SRC_ARRAY:
+ rc = qed_init_cmd_array(p_hwfn, p_ptt, p_cmd,
+ b_must_dmae, b_can_dmae);
+ break;
+ case INIT_SRC_RUNTIME:
+ qed_init_rt(p_hwfn, p_ptt, addr,
+ le16_to_cpu(arg->runtime.offset),
+ le16_to_cpu(arg->runtime.size),
+ b_must_dmae);
+ break;
+ }
+
+ return rc;
+}
+
+static inline bool comp_eq(u32 val, u32 expected_val)
+{
+ return val == expected_val;
+}
+
+static inline bool comp_and(u32 val, u32 expected_val)
+{
+ return (val & expected_val) == expected_val;
+}
+
+static inline bool comp_or(u32 val, u32 expected_val)
+{
+ return (val | expected_val) > 0;
+}
+
+/* init_ops read/poll commands */
+static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, struct init_read_op *cmd)
+{
+ bool (*comp_check)(u32 val, u32 expected_val);
+ u32 delay = QED_INIT_POLL_PERIOD_US, val;
+ u32 data, addr, poll;
+ int i;
+
+ data = le32_to_cpu(cmd->op_data);
+ addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2;
+ poll = GET_FIELD(data, INIT_READ_OP_POLL_TYPE);
+
+ val = qed_rd(p_hwfn, p_ptt, addr);
+
+ if (poll == INIT_POLL_NONE)
+ return;
+
+ switch (poll) {
+ case INIT_POLL_EQ:
+ comp_check = comp_eq;
+ break;
+ case INIT_POLL_OR:
+ comp_check = comp_or;
+ break;
+ case INIT_POLL_AND:
+ comp_check = comp_and;
+ break;
+ default:
+ DP_ERR(p_hwfn, "Invalid poll comparison type %08x\n",
+ cmd->op_data);
+ return;
+ }
+
+ data = le32_to_cpu(cmd->expected_val);
+ for (i = 0;
+ i < QED_INIT_MAX_POLL_COUNT && !comp_check(val, data);
+ i++) {
+ udelay(delay);
+ val = qed_rd(p_hwfn, p_ptt, addr);
+ }
+
+ if (i == QED_INIT_MAX_POLL_COUNT) {
+ DP_ERR(p_hwfn,
+ "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparison %08x)]\n",
+ addr, le32_to_cpu(cmd->expected_val),
+ val, le32_to_cpu(cmd->op_data));
+ }
+}
+
+/* init_ops callbacks entry point */
+static int qed_init_cmd_cb(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct init_callback_op *p_cmd)
+{
+ int rc;
+
+ switch (p_cmd->callback_id) {
+ case DMAE_READY_CB:
+ rc = qed_dmae_sanity(p_hwfn, p_ptt, "engine_phase");
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "Unexpected init op callback ID %d\n",
+ p_cmd->callback_id);
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+static u8 qed_init_cmd_mode_match(struct qed_hwfn *p_hwfn,
+ u16 *p_offset, int modes)
+{
+ struct qed_dev *cdev = p_hwfn->cdev;
+ const u8 *modes_tree_buf;
+ u8 arg1, arg2, tree_val;
+
+ modes_tree_buf = cdev->fw_data->modes_tree_buf;
+ tree_val = modes_tree_buf[(*p_offset)++];
+ switch (tree_val) {
+ case INIT_MODE_OP_NOT:
+ return qed_init_cmd_mode_match(p_hwfn, p_offset, modes) ^ 1;
+ case INIT_MODE_OP_OR:
+ arg1 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
+ arg2 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
+ return arg1 | arg2;
+ case INIT_MODE_OP_AND:
+ arg1 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
+ arg2 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes);
+ return arg1 & arg2;
+ default:
+ tree_val -= MAX_INIT_MODE_OPS;
+ return (modes & BIT(tree_val)) ? 1 : 0;
+ }
+}
+
+static u32 qed_init_cmd_mode(struct qed_hwfn *p_hwfn,
+ struct init_if_mode_op *p_cmd, int modes)
+{
+ u16 offset = le16_to_cpu(p_cmd->modes_buf_offset);
+
+ if (qed_init_cmd_mode_match(p_hwfn, &offset, modes))
+ return 0;
+ else
+ return GET_FIELD(le32_to_cpu(p_cmd->op_data),
+ INIT_IF_MODE_OP_CMD_OFFSET);
+}
+
+static u32 qed_init_cmd_phase(struct init_if_phase_op *p_cmd,
+ u32 phase, u32 phase_id)
+{
+ u32 data = le32_to_cpu(p_cmd->phase_data);
+ u32 op_data = le32_to_cpu(p_cmd->op_data);
+
+ if (!(GET_FIELD(data, INIT_IF_PHASE_OP_PHASE) == phase &&
+ (GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == ANY_PHASE_ID ||
+ GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == phase_id)))
+ return GET_FIELD(op_data, INIT_IF_PHASE_OP_CMD_OFFSET);
+ else
+ return 0;
+}
+
+int qed_init_run(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, int phase, int phase_id, int modes)
+{
+ bool b_dmae = (phase != PHASE_ENGINE);
+ struct qed_dev *cdev = p_hwfn->cdev;
+ u32 cmd_num, num_init_ops;
+ union init_op *init_ops;
+ int rc = 0;
+
+ num_init_ops = cdev->fw_data->init_ops_size;
+ init_ops = cdev->fw_data->init_ops;
+
+ p_hwfn->unzip_buf = kzalloc(MAX_ZIPPED_SIZE * 4, GFP_ATOMIC);
+ if (!p_hwfn->unzip_buf)
+ return -ENOMEM;
+
+ for (cmd_num = 0; cmd_num < num_init_ops; cmd_num++) {
+ union init_op *cmd = &init_ops[cmd_num];
+ u32 data = le32_to_cpu(cmd->raw.op_data);
+
+ switch (GET_FIELD(data, INIT_CALLBACK_OP_OP)) {
+ case INIT_OP_WRITE:
+ rc = qed_init_cmd_wr(p_hwfn, p_ptt, &cmd->write,
+ b_dmae);
+ break;
+ case INIT_OP_READ:
+ qed_init_cmd_rd(p_hwfn, p_ptt, &cmd->read);
+ break;
+ case INIT_OP_IF_MODE:
+ cmd_num += qed_init_cmd_mode(p_hwfn, &cmd->if_mode,
+ modes);
+ break;
+ case INIT_OP_IF_PHASE:
+ cmd_num += qed_init_cmd_phase(&cmd->if_phase,
+ phase, phase_id);
+ break;
+ case INIT_OP_DELAY:
+ /* qed_init_run is always invoked from
+ * sleep-able context
+ */
+ udelay(le32_to_cpu(cmd->delay.delay));
+ break;
+
+ case INIT_OP_CALLBACK:
+ rc = qed_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
+ if (phase == PHASE_ENGINE &&
+ cmd->callback.callback_id == DMAE_READY_CB)
+ b_dmae = true;
+ break;
+ }
+
+ if (rc)
+ break;
+ }
+
+ kfree(p_hwfn->unzip_buf);
+ p_hwfn->unzip_buf = NULL;
+ return rc;
+}
+
+void qed_gtt_init(struct qed_hwfn *p_hwfn)
+{
+ u32 gtt_base;
+ u32 i;
+
+ /* Set the global windows */
+ gtt_base = PXP_PF_WINDOW_ADMIN_START + PXP_PF_WINDOW_ADMIN_GLOBAL_START;
+
+ for (i = 0; i < ARRAY_SIZE(pxp_global_win); i++)
+ if (pxp_global_win[i])
+ REG_WR(p_hwfn, gtt_base + i * PXP_GLOBAL_ENTRY_SIZE,
+ pxp_global_win[i]);
+}
+
+int qed_init_fw_data(struct qed_dev *cdev, const u8 *data)
+{
+ struct qed_fw_data *fw = cdev->fw_data;
+ struct bin_buffer_hdr *buf_hdr;
+ u32 offset, len;
+
+ if (!data) {
+ DP_NOTICE(cdev, "Invalid fw data\n");
+ return -EINVAL;
+ }
+
+ /* First Dword contains metadata and should be skipped */
+ buf_hdr = (struct bin_buffer_hdr *)data;
+
+ offset = buf_hdr[BIN_BUF_INIT_FW_VER_INFO].offset;
+ fw->fw_ver_info = (struct fw_ver_info *)(data + offset);
+
+ offset = buf_hdr[BIN_BUF_INIT_CMD].offset;
+ fw->init_ops = (union init_op *)(data + offset);
+
+ offset = buf_hdr[BIN_BUF_INIT_VAL].offset;
+ fw->arr_data = (u32 *)(data + offset);
+
+ offset = buf_hdr[BIN_BUF_INIT_MODE_TREE].offset;
+ fw->modes_tree_buf = (u8 *)(data + offset);
+ len = buf_hdr[BIN_BUF_INIT_CMD].length;
+ fw->init_ops_size = len / sizeof(struct init_raw_op);
+
+ offset = buf_hdr[BIN_BUF_INIT_OVERLAYS].offset;
+ fw->fw_overlays = (u32 *)(data + offset);
+ len = buf_hdr[BIN_BUF_INIT_OVERLAYS].length;
+ fw->fw_overlays_len = len;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.h b/drivers/net/ethernet/qlogic/qed/qed_init_ops.h
new file mode 100644
index 000000000..12e5c4e37
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ * Copyright (c) 2019-2020 Marvell International Ltd.
+ */
+
+#ifndef _QED_INIT_OPS_H
+#define _QED_INIT_OPS_H
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include "qed.h"
+
+/**
+ * qed_init_iro_array(): init iro_arr.
+ *
+ * @cdev: Qed dev pointer.
+ *
+ * Return: Void.
+ */
+void qed_init_iro_array(struct qed_dev *cdev);
+
+/**
+ * qed_init_run(): Run the init-sequence.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @phase: Phase.
+ * @phase_id: Phase ID.
+ * @modes: Mode.
+ *
+ * Return: _qed_status_t
+ */
+int qed_init_run(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ int phase,
+ int phase_id,
+ int modes);
+
+/**
+ * qed_init_alloc(): Allocate RT array, Store 'values' ptrs.
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Return: _qed_status_t.
+ */
+int qed_init_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * qed_init_free(): Init HW function deallocate.
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Return: Void.
+ */
+void qed_init_free(struct qed_hwfn *p_hwfn);
+
+/**
+ * qed_init_store_rt_reg(): Store a configuration value in the RT array.
+ *
+ * @p_hwfn: HW device data.
+ * @rt_offset: RT offset.
+ * @val: Val.
+ *
+ * Return: Void.
+ */
+void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn,
+ u32 rt_offset,
+ u32 val);
+
+#define STORE_RT_REG(hwfn, offset, val) \
+ qed_init_store_rt_reg(hwfn, offset, val)
+
+#define OVERWRITE_RT_REG(hwfn, offset, val) \
+ qed_init_store_rt_reg(hwfn, offset, val)
+
+void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn,
+ u32 rt_offset,
+ u32 *val,
+ size_t size);
+
+#define STORE_RT_REG_AGG(hwfn, offset, val) \
+ qed_init_store_rt_agg(hwfn, offset, (u32 *)&(val), sizeof(val))
+
+/**
+ * qed_gtt_init(): Initialize GTT global windows and set admin window
+ * related params of GTT/PTT to default values.
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Return Void.
+ */
+void qed_gtt_init(struct qed_hwfn *p_hwfn);
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
new file mode 100644
index 000000000..2661c483c
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -0,0 +1,2423 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ * Copyright (c) 2019-2020 Marvell International Ltd.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include "qed.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_init_ops.h"
+#include "qed_int.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+#include "qed_sp.h"
+#include "qed_sriov.h"
+#include "qed_vf.h"
+
+struct qed_pi_info {
+ qed_int_comp_cb_t comp_cb;
+ void *cookie;
+};
+
+struct qed_sb_sp_info {
+ struct qed_sb_info sb_info;
+
+ /* per protocol index data */
+ struct qed_pi_info pi_info_arr[PIS_PER_SB];
+};
+
+enum qed_attention_type {
+ QED_ATTN_TYPE_ATTN,
+ QED_ATTN_TYPE_PARITY,
+};
+
+#define SB_ATTN_ALIGNED_SIZE(p_hwfn) \
+ ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn)
+
+struct aeu_invert_reg_bit {
+ char bit_name[30];
+
+#define ATTENTION_PARITY (1 << 0)
+
+#define ATTENTION_LENGTH_MASK (0x00000ff0)
+#define ATTENTION_LENGTH_SHIFT (4)
+#define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \
+ ATTENTION_LENGTH_SHIFT)
+#define ATTENTION_SINGLE BIT(ATTENTION_LENGTH_SHIFT)
+#define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY)
+#define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \
+ ATTENTION_PARITY)
+
+/* Multiple bits start with this offset */
+#define ATTENTION_OFFSET_MASK (0x000ff000)
+#define ATTENTION_OFFSET_SHIFT (12)
+
+#define ATTENTION_BB_MASK (0x00700000)
+#define ATTENTION_BB_SHIFT (20)
+#define ATTENTION_BB(value) (value << ATTENTION_BB_SHIFT)
+#define ATTENTION_BB_DIFFERENT BIT(23)
+
+#define ATTENTION_CLEAR_ENABLE BIT(28)
+ unsigned int flags;
+
+ /* Callback to call if attention will be triggered */
+ int (*cb)(struct qed_hwfn *p_hwfn);
+
+ enum block_id block_index;
+};
+
+struct aeu_invert_reg {
+ struct aeu_invert_reg_bit bits[32];
+};
+
+#define MAX_ATTN_GRPS (8)
+#define NUM_ATTN_REGS (9)
+
+/* Specific HW attention callbacks */
+static int qed_mcp_attn_cb(struct qed_hwfn *p_hwfn)
+{
+ u32 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_STATE);
+
+ /* This might occur on certain instances; Log it once then mask it */
+ DP_INFO(p_hwfn->cdev, "MCP_REG_CPU_STATE: %08x - Masking...\n",
+ tmp);
+ qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_EVENT_MASK,
+ 0xffffffff);
+
+ return 0;
+}
+
+#define QED_PSWHST_ATTENTION_INCORRECT_ACCESS (0x1)
+#define ATTENTION_INCORRECT_ACCESS_WR_MASK (0x1)
+#define ATTENTION_INCORRECT_ACCESS_WR_SHIFT (0)
+#define ATTENTION_INCORRECT_ACCESS_CLIENT_MASK (0xf)
+#define ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT (1)
+#define ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK (0x1)
+#define ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT (5)
+#define ATTENTION_INCORRECT_ACCESS_VF_ID_MASK (0xff)
+#define ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT (6)
+#define ATTENTION_INCORRECT_ACCESS_PF_ID_MASK (0xf)
+#define ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT (14)
+#define ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK (0xff)
+#define ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT (18)
+static int qed_pswhst_attn_cb(struct qed_hwfn *p_hwfn)
+{
+ u32 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PSWHST_REG_INCORRECT_ACCESS_VALID);
+
+ if (tmp & QED_PSWHST_ATTENTION_INCORRECT_ACCESS) {
+ u32 addr, data, length;
+
+ addr = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PSWHST_REG_INCORRECT_ACCESS_ADDRESS);
+ data = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PSWHST_REG_INCORRECT_ACCESS_DATA);
+ length = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ PSWHST_REG_INCORRECT_ACCESS_LENGTH);
+
+ DP_INFO(p_hwfn->cdev,
+ "Incorrect access to %08x of length %08x - PF [%02x] VF [%04x] [valid %02x] client [%02x] write [%02x] Byte-Enable [%04x] [%08x]\n",
+ addr, length,
+ (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_PF_ID),
+ (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_VF_ID),
+ (u8) GET_FIELD(data,
+ ATTENTION_INCORRECT_ACCESS_VF_VALID),
+ (u8) GET_FIELD(data,
+ ATTENTION_INCORRECT_ACCESS_CLIENT),
+ (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_WR),
+ (u8) GET_FIELD(data,
+ ATTENTION_INCORRECT_ACCESS_BYTE_EN),
+ data);
+ }
+
+ return 0;
+}
+
+#define QED_GRC_ATTENTION_VALID_BIT (1 << 0)
+#define QED_GRC_ATTENTION_ADDRESS_MASK (0x7fffff)
+#define QED_GRC_ATTENTION_ADDRESS_SHIFT (0)
+#define QED_GRC_ATTENTION_RDWR_BIT (1 << 23)
+#define QED_GRC_ATTENTION_MASTER_MASK (0xf)
+#define QED_GRC_ATTENTION_MASTER_SHIFT (24)
+#define QED_GRC_ATTENTION_PF_MASK (0xf)
+#define QED_GRC_ATTENTION_PF_SHIFT (0)
+#define QED_GRC_ATTENTION_VF_MASK (0xff)
+#define QED_GRC_ATTENTION_VF_SHIFT (4)
+#define QED_GRC_ATTENTION_PRIV_MASK (0x3)
+#define QED_GRC_ATTENTION_PRIV_SHIFT (14)
+#define QED_GRC_ATTENTION_PRIV_VF (0)
+static const char *attn_master_to_str(u8 master)
+{
+ switch (master) {
+ case 1: return "PXP";
+ case 2: return "MCP";
+ case 3: return "MSDM";
+ case 4: return "PSDM";
+ case 5: return "YSDM";
+ case 6: return "USDM";
+ case 7: return "TSDM";
+ case 8: return "XSDM";
+ case 9: return "DBU";
+ case 10: return "DMAE";
+ default:
+ return "Unknown";
+ }
+}
+
+static int qed_grc_attn_cb(struct qed_hwfn *p_hwfn)
+{
+ u32 tmp, tmp2;
+
+ /* We've already cleared the timeout interrupt register, so we learn
+ * of interrupts via the validity register
+ */
+ tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ GRC_REG_TIMEOUT_ATTN_ACCESS_VALID);
+ if (!(tmp & QED_GRC_ATTENTION_VALID_BIT))
+ goto out;
+
+ /* Read the GRC timeout information */
+ tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0);
+ tmp2 = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1);
+
+ DP_INFO(p_hwfn->cdev,
+ "GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s] [PF: %02x %s %02x]\n",
+ tmp2, tmp,
+ (tmp & QED_GRC_ATTENTION_RDWR_BIT) ? "Write to" : "Read from",
+ GET_FIELD(tmp, QED_GRC_ATTENTION_ADDRESS) << 2,
+ attn_master_to_str(GET_FIELD(tmp, QED_GRC_ATTENTION_MASTER)),
+ GET_FIELD(tmp2, QED_GRC_ATTENTION_PF),
+ (GET_FIELD(tmp2, QED_GRC_ATTENTION_PRIV) ==
+ QED_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Irrelevant)",
+ GET_FIELD(tmp2, QED_GRC_ATTENTION_VF));
+
+out:
+ /* Regardles of anything else, clean the validity bit */
+ qed_wr(p_hwfn, p_hwfn->p_dpc_ptt,
+ GRC_REG_TIMEOUT_ATTN_ACCESS_VALID, 0);
+ return 0;
+}
+
+#define PGLUE_ATTENTION_VALID (1 << 29)
+#define PGLUE_ATTENTION_RD_VALID (1 << 26)
+#define PGLUE_ATTENTION_DETAILS_PFID_MASK (0xf)
+#define PGLUE_ATTENTION_DETAILS_PFID_SHIFT (20)
+#define PGLUE_ATTENTION_DETAILS_VF_VALID_MASK (0x1)
+#define PGLUE_ATTENTION_DETAILS_VF_VALID_SHIFT (19)
+#define PGLUE_ATTENTION_DETAILS_VFID_MASK (0xff)
+#define PGLUE_ATTENTION_DETAILS_VFID_SHIFT (24)
+#define PGLUE_ATTENTION_DETAILS2_WAS_ERR_MASK (0x1)
+#define PGLUE_ATTENTION_DETAILS2_WAS_ERR_SHIFT (21)
+#define PGLUE_ATTENTION_DETAILS2_BME_MASK (0x1)
+#define PGLUE_ATTENTION_DETAILS2_BME_SHIFT (22)
+#define PGLUE_ATTENTION_DETAILS2_FID_EN_MASK (0x1)
+#define PGLUE_ATTENTION_DETAILS2_FID_EN_SHIFT (23)
+#define PGLUE_ATTENTION_ICPL_VALID (1 << 23)
+#define PGLUE_ATTENTION_ZLR_VALID (1 << 25)
+#define PGLUE_ATTENTION_ILT_VALID (1 << 23)
+
+int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ bool hw_init)
+{
+ char msg[256];
+ u32 tmp;
+
+ tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS2);
+ if (tmp & PGLUE_ATTENTION_VALID) {
+ u32 addr_lo, addr_hi, details;
+
+ addr_lo = qed_rd(p_hwfn, p_ptt,
+ PGLUE_B_REG_TX_ERR_WR_ADD_31_0);
+ addr_hi = qed_rd(p_hwfn, p_ptt,
+ PGLUE_B_REG_TX_ERR_WR_ADD_63_32);
+ details = qed_rd(p_hwfn, p_ptt,
+ PGLUE_B_REG_TX_ERR_WR_DETAILS);
+
+ snprintf(msg, sizeof(msg),
+ "Illegal write by chip to [%08x:%08x] blocked.\n"
+ "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n"
+ "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]",
+ addr_hi, addr_lo, details,
+ (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID),
+ (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID),
+ !!GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VF_VALID),
+ tmp,
+ !!GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_WAS_ERR),
+ !!GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_BME),
+ !!GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_FID_EN));
+
+ if (hw_init)
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "%s\n", msg);
+ else
+ DP_NOTICE(p_hwfn, "%s\n", msg);
+ }
+
+ tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_DETAILS2);
+ if (tmp & PGLUE_ATTENTION_RD_VALID) {
+ u32 addr_lo, addr_hi, details;
+
+ addr_lo = qed_rd(p_hwfn, p_ptt,
+ PGLUE_B_REG_TX_ERR_RD_ADD_31_0);
+ addr_hi = qed_rd(p_hwfn, p_ptt,
+ PGLUE_B_REG_TX_ERR_RD_ADD_63_32);
+ details = qed_rd(p_hwfn, p_ptt,
+ PGLUE_B_REG_TX_ERR_RD_DETAILS);
+
+ DP_NOTICE(p_hwfn,
+ "Illegal read by chip from [%08x:%08x] blocked.\n"
+ "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n"
+ "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
+ addr_hi, addr_lo, details,
+ (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID),
+ (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID),
+ GET_FIELD(details,
+ PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0,
+ tmp,
+ GET_FIELD(tmp,
+ PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0,
+ GET_FIELD(tmp,
+ PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0,
+ GET_FIELD(tmp,
+ PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0);
+ }
+
+ tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL);
+ if (tmp & PGLUE_ATTENTION_ICPL_VALID) {
+ snprintf(msg, sizeof(msg), "ICPL error - %08x", tmp);
+
+ if (hw_init)
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "%s\n", msg);
+ else
+ DP_NOTICE(p_hwfn, "%s\n", msg);
+ }
+
+ tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS);
+ if (tmp & PGLUE_ATTENTION_ZLR_VALID) {
+ u32 addr_hi, addr_lo;
+
+ addr_lo = qed_rd(p_hwfn, p_ptt,
+ PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0);
+ addr_hi = qed_rd(p_hwfn, p_ptt,
+ PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32);
+
+ DP_NOTICE(p_hwfn, "ZLR error - %08x [Address %08x:%08x]\n",
+ tmp, addr_hi, addr_lo);
+ }
+
+ tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_ILT_ERR_DETAILS2);
+ if (tmp & PGLUE_ATTENTION_ILT_VALID) {
+ u32 addr_hi, addr_lo, details;
+
+ addr_lo = qed_rd(p_hwfn, p_ptt,
+ PGLUE_B_REG_VF_ILT_ERR_ADD_31_0);
+ addr_hi = qed_rd(p_hwfn, p_ptt,
+ PGLUE_B_REG_VF_ILT_ERR_ADD_63_32);
+ details = qed_rd(p_hwfn, p_ptt,
+ PGLUE_B_REG_VF_ILT_ERR_DETAILS);
+
+ DP_NOTICE(p_hwfn,
+ "ILT error - Details %08x Details2 %08x [Address %08x:%08x]\n",
+ details, tmp, addr_hi, addr_lo);
+ }
+
+ /* Clear the indications */
+ qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_LATCHED_ERRORS_CLR, BIT(2));
+
+ return 0;
+}
+
+static int qed_pglueb_rbc_attn_cb(struct qed_hwfn *p_hwfn)
+{
+ return qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt, false);
+}
+
+static int qed_fw_assertion(struct qed_hwfn *p_hwfn)
+{
+ qed_hw_err_notify(p_hwfn, p_hwfn->p_dpc_ptt, QED_HW_ERR_FW_ASSERT,
+ "FW assertion!\n");
+
+ /* Clear assert indications */
+ qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, MISC_REG_AEU_GENERAL_ATTN_32, 0);
+
+ return -EINVAL;
+}
+
+static int qed_general_attention_35(struct qed_hwfn *p_hwfn)
+{
+ DP_INFO(p_hwfn, "General attention 35!\n");
+
+ return 0;
+}
+
+#define QED_DORQ_ATTENTION_REASON_MASK (0xfffff)
+#define QED_DORQ_ATTENTION_OPAQUE_MASK (0xffff)
+#define QED_DORQ_ATTENTION_OPAQUE_SHIFT (0x0)
+#define QED_DORQ_ATTENTION_SIZE_MASK (0x7f)
+#define QED_DORQ_ATTENTION_SIZE_SHIFT (16)
+
+#define QED_DB_REC_COUNT 1000
+#define QED_DB_REC_INTERVAL 100
+
+static int qed_db_rec_flush_queue(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 count = QED_DB_REC_COUNT;
+ u32 usage = 1;
+
+ /* Flush any pending (e)dpms as they may never arrive */
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1);
+
+ /* wait for usage to zero or count to run out. This is necessary since
+ * EDPM doorbell transactions can take multiple 64b cycles, and as such
+ * can "split" over the pci. Possibly, the doorbell drop can happen with
+ * half an EDPM in the queue and other half dropped. Another EDPM
+ * doorbell to the same address (from doorbell recovery mechanism or
+ * from the doorbelling entity) could have first half dropped and second
+ * half interpreted as continuation of the first. To prevent such
+ * malformed doorbells from reaching the device, flush the queue before
+ * releasing the overflow sticky indication.
+ */
+ while (count-- && usage) {
+ usage = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_USAGE_CNT);
+ udelay(QED_DB_REC_INTERVAL);
+ }
+
+ /* should have been depleted by now */
+ if (usage) {
+ DP_NOTICE(p_hwfn->cdev,
+ "DB recovery: doorbell usage failed to zero after %d usec. usage was %x\n",
+ QED_DB_REC_INTERVAL * QED_DB_REC_COUNT, usage);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+int qed_db_rec_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 attn_ovfl, cur_ovfl;
+ int rc;
+
+ attn_ovfl = test_and_clear_bit(QED_OVERFLOW_BIT,
+ &p_hwfn->db_recovery_info.overflow);
+ cur_ovfl = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY);
+ if (!cur_ovfl && !attn_ovfl)
+ return 0;
+
+ DP_NOTICE(p_hwfn, "PF Overflow sticky: attn %u current %u\n",
+ attn_ovfl, cur_ovfl);
+
+ if (cur_ovfl && !p_hwfn->db_bar_no_edpm) {
+ rc = qed_db_rec_flush_queue(p_hwfn, p_ptt);
+ if (rc)
+ return rc;
+ }
+
+ /* Release overflow sticky indication (stop silently dropping everything) */
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0);
+
+ /* Repeat all last doorbells (doorbell drop recovery) */
+ qed_db_recovery_execute(p_hwfn);
+
+ return 0;
+}
+
+static void qed_dorq_attn_overflow(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt;
+ u32 overflow;
+ int rc;
+
+ overflow = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY);
+ if (!overflow)
+ goto out;
+
+ /* Run PF doorbell recovery in next periodic handler */
+ set_bit(QED_OVERFLOW_BIT, &p_hwfn->db_recovery_info.overflow);
+
+ if (!p_hwfn->db_bar_no_edpm) {
+ rc = qed_db_rec_flush_queue(p_hwfn, p_ptt);
+ if (rc)
+ goto out;
+ }
+
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0);
+out:
+ /* Schedule the handler even if overflow was not detected */
+ qed_periodic_db_rec_start(p_hwfn);
+}
+
+static int qed_dorq_attn_int_sts(struct qed_hwfn *p_hwfn)
+{
+ u32 int_sts, first_drop_reason, details, address, all_drops_reason;
+ struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt;
+
+ int_sts = qed_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS);
+ if (int_sts == 0xdeadbeaf) {
+ DP_NOTICE(p_hwfn->cdev,
+ "DORQ is being reset, skipping int_sts handler\n");
+
+ return 0;
+ }
+
+ /* int_sts may be zero since all PFs were interrupted for doorbell
+ * overflow but another one already handled it. Can abort here. If
+ * This PF also requires overflow recovery we will be interrupted again.
+ * The masked almost full indication may also be set. Ignoring.
+ */
+ if (!(int_sts & ~DORQ_REG_INT_STS_DORQ_FIFO_AFULL))
+ return 0;
+
+ DP_NOTICE(p_hwfn->cdev, "DORQ attention. int_sts was %x\n", int_sts);
+
+ /* check if db_drop or overflow happened */
+ if (int_sts & (DORQ_REG_INT_STS_DB_DROP |
+ DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR)) {
+ /* Obtain data about db drop/overflow */
+ first_drop_reason = qed_rd(p_hwfn, p_ptt,
+ DORQ_REG_DB_DROP_REASON) &
+ QED_DORQ_ATTENTION_REASON_MASK;
+ details = qed_rd(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS);
+ address = qed_rd(p_hwfn, p_ptt,
+ DORQ_REG_DB_DROP_DETAILS_ADDRESS);
+ all_drops_reason = qed_rd(p_hwfn, p_ptt,
+ DORQ_REG_DB_DROP_DETAILS_REASON);
+
+ /* Log info */
+ DP_NOTICE(p_hwfn->cdev,
+ "Doorbell drop occurred\n"
+ "Address\t\t0x%08x\t(second BAR address)\n"
+ "FID\t\t0x%04x\t\t(Opaque FID)\n"
+ "Size\t\t0x%04x\t\t(in bytes)\n"
+ "1st drop reason\t0x%08x\t(details on first drop since last handling)\n"
+ "Sticky reasons\t0x%08x\t(all drop reasons since last handling)\n",
+ address,
+ GET_FIELD(details, QED_DORQ_ATTENTION_OPAQUE),
+ GET_FIELD(details, QED_DORQ_ATTENTION_SIZE) * 4,
+ first_drop_reason, all_drops_reason);
+
+ /* Clear the doorbell drop details and prepare for next drop */
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS_REL, 0);
+
+ /* Mark interrupt as handled (note: even if drop was due to a different
+ * reason than overflow we mark as handled)
+ */
+ qed_wr(p_hwfn,
+ p_ptt,
+ DORQ_REG_INT_STS_WR,
+ DORQ_REG_INT_STS_DB_DROP |
+ DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR);
+
+ /* If there are no indications other than drop indications, success */
+ if ((int_sts & ~(DORQ_REG_INT_STS_DB_DROP |
+ DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR |
+ DORQ_REG_INT_STS_DORQ_FIFO_AFULL)) == 0)
+ return 0;
+ }
+
+ /* Some other indication was present - non recoverable */
+ DP_INFO(p_hwfn, "DORQ fatal attention\n");
+
+ return -EINVAL;
+}
+
+static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn)
+{
+ if (p_hwfn->cdev->recov_in_prog)
+ return 0;
+
+ p_hwfn->db_recovery_info.dorq_attn = true;
+ qed_dorq_attn_overflow(p_hwfn);
+
+ return qed_dorq_attn_int_sts(p_hwfn);
+}
+
+static void qed_dorq_attn_handler(struct qed_hwfn *p_hwfn)
+{
+ if (p_hwfn->db_recovery_info.dorq_attn)
+ goto out;
+
+ /* Call DORQ callback if the attention was missed */
+ qed_dorq_attn_cb(p_hwfn);
+out:
+ p_hwfn->db_recovery_info.dorq_attn = false;
+}
+
+/* Instead of major changes to the data-structure, we have a some 'special'
+ * identifiers for sources that changed meaning between adapters.
+ */
+enum aeu_invert_reg_special_type {
+ AEU_INVERT_REG_SPECIAL_CNIG_0,
+ AEU_INVERT_REG_SPECIAL_CNIG_1,
+ AEU_INVERT_REG_SPECIAL_CNIG_2,
+ AEU_INVERT_REG_SPECIAL_CNIG_3,
+ AEU_INVERT_REG_SPECIAL_MAX,
+};
+
+static struct aeu_invert_reg_bit
+aeu_descs_special[AEU_INVERT_REG_SPECIAL_MAX] = {
+ {"CNIG port 0", ATTENTION_SINGLE, NULL, BLOCK_CNIG},
+ {"CNIG port 1", ATTENTION_SINGLE, NULL, BLOCK_CNIG},
+ {"CNIG port 2", ATTENTION_SINGLE, NULL, BLOCK_CNIG},
+ {"CNIG port 3", ATTENTION_SINGLE, NULL, BLOCK_CNIG},
+};
+
+/* Notice aeu_invert_reg must be defined in the same order of bits as HW; */
+static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = {
+ {
+ { /* After Invert 1 */
+ {"GPIO0 function%d",
+ (32 << ATTENTION_LENGTH_SHIFT), NULL, MAX_BLOCK_ID},
+ }
+ },
+
+ {
+ { /* After Invert 2 */
+ {"PGLUE config_space", ATTENTION_SINGLE,
+ NULL, MAX_BLOCK_ID},
+ {"PGLUE misc_flr", ATTENTION_SINGLE,
+ NULL, MAX_BLOCK_ID},
+ {"PGLUE B RBC", ATTENTION_PAR_INT,
+ qed_pglueb_rbc_attn_cb, BLOCK_PGLUE_B},
+ {"PGLUE misc_mctp", ATTENTION_SINGLE,
+ NULL, MAX_BLOCK_ID},
+ {"Flash event", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID},
+ {"SMB event", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID},
+ {"Main Power", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID},
+ {"SW timers #%d", (8 << ATTENTION_LENGTH_SHIFT) |
+ (1 << ATTENTION_OFFSET_SHIFT),
+ NULL, MAX_BLOCK_ID},
+ {"PCIE glue/PXP VPD %d",
+ (16 << ATTENTION_LENGTH_SHIFT), NULL, BLOCK_PGLCS},
+ }
+ },
+
+ {
+ { /* After Invert 3 */
+ {"General Attention %d",
+ (32 << ATTENTION_LENGTH_SHIFT), NULL, MAX_BLOCK_ID},
+ }
+ },
+
+ {
+ { /* After Invert 4 */
+ {"General Attention 32", ATTENTION_SINGLE |
+ ATTENTION_CLEAR_ENABLE, qed_fw_assertion,
+ MAX_BLOCK_ID},
+ {"General Attention %d",
+ (2 << ATTENTION_LENGTH_SHIFT) |
+ (33 << ATTENTION_OFFSET_SHIFT), NULL, MAX_BLOCK_ID},
+ {"General Attention 35", ATTENTION_SINGLE |
+ ATTENTION_CLEAR_ENABLE, qed_general_attention_35,
+ MAX_BLOCK_ID},
+ {"NWS Parity",
+ ATTENTION_PAR | ATTENTION_BB_DIFFERENT |
+ ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_0),
+ NULL, BLOCK_NWS},
+ {"NWS Interrupt",
+ ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT |
+ ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_1),
+ NULL, BLOCK_NWS},
+ {"NWM Parity",
+ ATTENTION_PAR | ATTENTION_BB_DIFFERENT |
+ ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_2),
+ NULL, BLOCK_NWM},
+ {"NWM Interrupt",
+ ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT |
+ ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_3),
+ NULL, BLOCK_NWM},
+ {"MCP CPU", ATTENTION_SINGLE,
+ qed_mcp_attn_cb, MAX_BLOCK_ID},
+ {"MCP Watchdog timer", ATTENTION_SINGLE,
+ NULL, MAX_BLOCK_ID},
+ {"MCP M2P", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID},
+ {"AVS stop status ready", ATTENTION_SINGLE,
+ NULL, MAX_BLOCK_ID},
+ {"MSTAT", ATTENTION_PAR_INT, NULL, MAX_BLOCK_ID},
+ {"MSTAT per-path", ATTENTION_PAR_INT,
+ NULL, MAX_BLOCK_ID},
+ {"Reserved %d", (6 << ATTENTION_LENGTH_SHIFT),
+ NULL, MAX_BLOCK_ID},
+ {"NIG", ATTENTION_PAR_INT, NULL, BLOCK_NIG},
+ {"BMB/OPTE/MCP", ATTENTION_PAR_INT, NULL, BLOCK_BMB},
+ {"BTB", ATTENTION_PAR_INT, NULL, BLOCK_BTB},
+ {"BRB", ATTENTION_PAR_INT, NULL, BLOCK_BRB},
+ {"PRS", ATTENTION_PAR_INT, NULL, BLOCK_PRS},
+ }
+ },
+
+ {
+ { /* After Invert 5 */
+ {"SRC", ATTENTION_PAR_INT, NULL, BLOCK_SRC},
+ {"PB Client1", ATTENTION_PAR_INT, NULL, BLOCK_PBF_PB1},
+ {"PB Client2", ATTENTION_PAR_INT, NULL, BLOCK_PBF_PB2},
+ {"RPB", ATTENTION_PAR_INT, NULL, BLOCK_RPB},
+ {"PBF", ATTENTION_PAR_INT, NULL, BLOCK_PBF},
+ {"QM", ATTENTION_PAR_INT, NULL, BLOCK_QM},
+ {"TM", ATTENTION_PAR_INT, NULL, BLOCK_TM},
+ {"MCM", ATTENTION_PAR_INT, NULL, BLOCK_MCM},
+ {"MSDM", ATTENTION_PAR_INT, NULL, BLOCK_MSDM},
+ {"MSEM", ATTENTION_PAR_INT, NULL, BLOCK_MSEM},
+ {"PCM", ATTENTION_PAR_INT, NULL, BLOCK_PCM},
+ {"PSDM", ATTENTION_PAR_INT, NULL, BLOCK_PSDM},
+ {"PSEM", ATTENTION_PAR_INT, NULL, BLOCK_PSEM},
+ {"TCM", ATTENTION_PAR_INT, NULL, BLOCK_TCM},
+ {"TSDM", ATTENTION_PAR_INT, NULL, BLOCK_TSDM},
+ {"TSEM", ATTENTION_PAR_INT, NULL, BLOCK_TSEM},
+ }
+ },
+
+ {
+ { /* After Invert 6 */
+ {"UCM", ATTENTION_PAR_INT, NULL, BLOCK_UCM},
+ {"USDM", ATTENTION_PAR_INT, NULL, BLOCK_USDM},
+ {"USEM", ATTENTION_PAR_INT, NULL, BLOCK_USEM},
+ {"XCM", ATTENTION_PAR_INT, NULL, BLOCK_XCM},
+ {"XSDM", ATTENTION_PAR_INT, NULL, BLOCK_XSDM},
+ {"XSEM", ATTENTION_PAR_INT, NULL, BLOCK_XSEM},
+ {"YCM", ATTENTION_PAR_INT, NULL, BLOCK_YCM},
+ {"YSDM", ATTENTION_PAR_INT, NULL, BLOCK_YSDM},
+ {"YSEM", ATTENTION_PAR_INT, NULL, BLOCK_YSEM},
+ {"XYLD", ATTENTION_PAR_INT, NULL, BLOCK_XYLD},
+ {"TMLD", ATTENTION_PAR_INT, NULL, BLOCK_TMLD},
+ {"MYLD", ATTENTION_PAR_INT, NULL, BLOCK_MULD},
+ {"YULD", ATTENTION_PAR_INT, NULL, BLOCK_YULD},
+ {"DORQ", ATTENTION_PAR_INT,
+ qed_dorq_attn_cb, BLOCK_DORQ},
+ {"DBG", ATTENTION_PAR_INT, NULL, BLOCK_DBG},
+ {"IPC", ATTENTION_PAR_INT, NULL, BLOCK_IPC},
+ }
+ },
+
+ {
+ { /* After Invert 7 */
+ {"CCFC", ATTENTION_PAR_INT, NULL, BLOCK_CCFC},
+ {"CDU", ATTENTION_PAR_INT, NULL, BLOCK_CDU},
+ {"DMAE", ATTENTION_PAR_INT, NULL, BLOCK_DMAE},
+ {"IGU", ATTENTION_PAR_INT, NULL, BLOCK_IGU},
+ {"ATC", ATTENTION_PAR_INT, NULL, MAX_BLOCK_ID},
+ {"CAU", ATTENTION_PAR_INT, NULL, BLOCK_CAU},
+ {"PTU", ATTENTION_PAR_INT, NULL, BLOCK_PTU},
+ {"PRM", ATTENTION_PAR_INT, NULL, BLOCK_PRM},
+ {"TCFC", ATTENTION_PAR_INT, NULL, BLOCK_TCFC},
+ {"RDIF", ATTENTION_PAR_INT, NULL, BLOCK_RDIF},
+ {"TDIF", ATTENTION_PAR_INT, NULL, BLOCK_TDIF},
+ {"RSS", ATTENTION_PAR_INT, NULL, BLOCK_RSS},
+ {"MISC", ATTENTION_PAR_INT, NULL, BLOCK_MISC},
+ {"MISCS", ATTENTION_PAR_INT, NULL, BLOCK_MISCS},
+ {"PCIE", ATTENTION_PAR, NULL, BLOCK_PCIE},
+ {"Vaux PCI core", ATTENTION_SINGLE, NULL, BLOCK_PGLCS},
+ {"PSWRQ", ATTENTION_PAR_INT, NULL, BLOCK_PSWRQ},
+ }
+ },
+
+ {
+ { /* After Invert 8 */
+ {"PSWRQ (pci_clk)", ATTENTION_PAR_INT,
+ NULL, BLOCK_PSWRQ2},
+ {"PSWWR", ATTENTION_PAR_INT, NULL, BLOCK_PSWWR},
+ {"PSWWR (pci_clk)", ATTENTION_PAR_INT,
+ NULL, BLOCK_PSWWR2},
+ {"PSWRD", ATTENTION_PAR_INT, NULL, BLOCK_PSWRD},
+ {"PSWRD (pci_clk)", ATTENTION_PAR_INT,
+ NULL, BLOCK_PSWRD2},
+ {"PSWHST", ATTENTION_PAR_INT,
+ qed_pswhst_attn_cb, BLOCK_PSWHST},
+ {"PSWHST (pci_clk)", ATTENTION_PAR_INT,
+ NULL, BLOCK_PSWHST2},
+ {"GRC", ATTENTION_PAR_INT,
+ qed_grc_attn_cb, BLOCK_GRC},
+ {"CPMU", ATTENTION_PAR_INT, NULL, BLOCK_CPMU},
+ {"NCSI", ATTENTION_PAR_INT, NULL, BLOCK_NCSI},
+ {"MSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
+ {"PSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
+ {"TSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
+ {"USEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
+ {"XSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
+ {"YSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID},
+ {"pxp_misc_mps", ATTENTION_PAR, NULL, BLOCK_PGLCS},
+ {"PCIE glue/PXP Exp. ROM", ATTENTION_SINGLE,
+ NULL, BLOCK_PGLCS},
+ {"PERST_B assertion", ATTENTION_SINGLE,
+ NULL, MAX_BLOCK_ID},
+ {"PERST_B deassertion", ATTENTION_SINGLE,
+ NULL, MAX_BLOCK_ID},
+ {"Reserved %d", (2 << ATTENTION_LENGTH_SHIFT),
+ NULL, MAX_BLOCK_ID},
+ }
+ },
+
+ {
+ { /* After Invert 9 */
+ {"MCP Latched memory", ATTENTION_PAR,
+ NULL, MAX_BLOCK_ID},
+ {"MCP Latched scratchpad cache", ATTENTION_SINGLE,
+ NULL, MAX_BLOCK_ID},
+ {"MCP Latched ump_tx", ATTENTION_PAR,
+ NULL, MAX_BLOCK_ID},
+ {"MCP Latched scratchpad", ATTENTION_PAR,
+ NULL, MAX_BLOCK_ID},
+ {"Reserved %d", (28 << ATTENTION_LENGTH_SHIFT),
+ NULL, MAX_BLOCK_ID},
+ }
+ },
+};
+
+static struct aeu_invert_reg_bit *
+qed_int_aeu_translate(struct qed_hwfn *p_hwfn,
+ struct aeu_invert_reg_bit *p_bit)
+{
+ if (!QED_IS_BB(p_hwfn->cdev))
+ return p_bit;
+
+ if (!(p_bit->flags & ATTENTION_BB_DIFFERENT))
+ return p_bit;
+
+ return &aeu_descs_special[(p_bit->flags & ATTENTION_BB_MASK) >>
+ ATTENTION_BB_SHIFT];
+}
+
+static bool qed_int_is_parity_flag(struct qed_hwfn *p_hwfn,
+ struct aeu_invert_reg_bit *p_bit)
+{
+ return !!(qed_int_aeu_translate(p_hwfn, p_bit)->flags &
+ ATTENTION_PARITY);
+}
+
+#define ATTN_STATE_BITS (0xfff)
+#define ATTN_BITS_MASKABLE (0x3ff)
+struct qed_sb_attn_info {
+ /* Virtual & Physical address of the SB */
+ struct atten_status_block *sb_attn;
+ dma_addr_t sb_phys;
+
+ /* Last seen running index */
+ u16 index;
+
+ /* A mask of the AEU bits resulting in a parity error */
+ u32 parity_mask[NUM_ATTN_REGS];
+
+ /* A pointer to the attention description structure */
+ struct aeu_invert_reg *p_aeu_desc;
+
+ /* Previously asserted attentions, which are still unasserted */
+ u16 known_attn;
+
+ /* Cleanup address for the link's general hw attention */
+ u32 mfw_attn_addr;
+};
+
+static inline u16 qed_attn_update_idx(struct qed_hwfn *p_hwfn,
+ struct qed_sb_attn_info *p_sb_desc)
+{
+ u16 rc = 0, index;
+
+ index = le16_to_cpu(p_sb_desc->sb_attn->sb_index);
+ if (p_sb_desc->index != index) {
+ p_sb_desc->index = index;
+ rc = QED_SB_ATT_IDX;
+ }
+
+ return rc;
+}
+
+/**
+ * qed_int_assertion() - Handle asserted attention bits.
+ *
+ * @p_hwfn: HW device data.
+ * @asserted_bits: Newly asserted bits.
+ *
+ * Return: Zero value.
+ */
+static int qed_int_assertion(struct qed_hwfn *p_hwfn, u16 asserted_bits)
+{
+ struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
+ u32 igu_mask;
+
+ /* Mask the source of the attention in the IGU */
+ igu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE);
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n",
+ igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE));
+ igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE);
+ qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, igu_mask);
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "inner known ATTN state: 0x%04x --> 0x%04x\n",
+ sb_attn_sw->known_attn,
+ sb_attn_sw->known_attn | asserted_bits);
+ sb_attn_sw->known_attn |= asserted_bits;
+
+ /* Handle MCP events */
+ if (asserted_bits & 0x100) {
+ qed_mcp_handle_events(p_hwfn, p_hwfn->p_dpc_ptt);
+ /* Clean the MCP attention */
+ qed_wr(p_hwfn, p_hwfn->p_dpc_ptt,
+ sb_attn_sw->mfw_attn_addr, 0);
+ }
+
+ DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview +
+ GTT_BAR0_MAP_REG_IGU_CMD +
+ ((IGU_CMD_ATTN_BIT_SET_UPPER -
+ IGU_CMD_INT_ACK_BASE) << 3),
+ (u32)asserted_bits);
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "set cmd IGU: 0x%04x\n",
+ asserted_bits);
+
+ return 0;
+}
+
+static void qed_int_attn_print(struct qed_hwfn *p_hwfn,
+ enum block_id id,
+ enum dbg_attn_type type, bool b_clear)
+{
+ struct dbg_attn_block_result attn_results;
+ enum dbg_status status;
+
+ memset(&attn_results, 0, sizeof(attn_results));
+
+ status = qed_dbg_read_attn(p_hwfn, p_hwfn->p_dpc_ptt, id, type,
+ b_clear, &attn_results);
+ if (status != DBG_STATUS_OK)
+ DP_NOTICE(p_hwfn,
+ "Failed to parse attention information [status: %s]\n",
+ qed_dbg_get_status_str(status));
+ else
+ qed_dbg_parse_attn(p_hwfn, &attn_results);
+}
+
+/**
+ * qed_int_deassertion_aeu_bit() - Handles the effects of a single
+ * cause of the attention.
+ *
+ * @p_hwfn: HW device data.
+ * @p_aeu: Descriptor of an AEU bit which caused the attention.
+ * @aeu_en_reg: Register offset of the AEU enable reg. which configured
+ * this bit to this group.
+ * @p_bit_name: AEU bit description for logging purposes.
+ * @bitmask: Index of this bit in the aeu_en_reg.
+ *
+ * Return: Zero on success, negative errno otherwise.
+ */
+static int
+qed_int_deassertion_aeu_bit(struct qed_hwfn *p_hwfn,
+ struct aeu_invert_reg_bit *p_aeu,
+ u32 aeu_en_reg,
+ const char *p_bit_name, u32 bitmask)
+{
+ bool b_fatal = false;
+ int rc = -EINVAL;
+ u32 val;
+
+ DP_INFO(p_hwfn, "Deasserted attention `%s'[%08x]\n",
+ p_bit_name, bitmask);
+
+ /* Call callback before clearing the interrupt status */
+ if (p_aeu->cb) {
+ DP_INFO(p_hwfn, "`%s (attention)': Calling Callback function\n",
+ p_bit_name);
+ rc = p_aeu->cb(p_hwfn);
+ }
+
+ if (rc)
+ b_fatal = true;
+
+ /* Print HW block interrupt registers */
+ if (p_aeu->block_index != MAX_BLOCK_ID)
+ qed_int_attn_print(p_hwfn, p_aeu->block_index,
+ ATTN_TYPE_INTERRUPT, !b_fatal);
+
+ /* Reach assertion if attention is fatal */
+ if (b_fatal)
+ qed_hw_err_notify(p_hwfn, p_hwfn->p_dpc_ptt, QED_HW_ERR_HW_ATTN,
+ "`%s': Fatal attention\n",
+ p_bit_name);
+ else /* If the attention is benign, no need to prevent it */
+ goto out;
+
+ /* Prevent this Attention from being asserted in the future */
+ val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg);
+ qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & ~bitmask));
+ DP_INFO(p_hwfn, "`%s' - Disabled future attentions\n",
+ p_bit_name);
+
+ /* Re-enable FW aassertion (Gen 32) interrupts */
+ val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ MISC_REG_AEU_ENABLE4_IGU_OUT_0);
+ val |= MISC_REG_AEU_ENABLE4_IGU_OUT_0_GENERAL_ATTN32;
+ qed_wr(p_hwfn, p_hwfn->p_dpc_ptt,
+ MISC_REG_AEU_ENABLE4_IGU_OUT_0, val);
+
+out:
+ return rc;
+}
+
+/**
+ * qed_int_deassertion_parity() - Handle a single parity AEU source.
+ *
+ * @p_hwfn: HW device data.
+ * @p_aeu: Descriptor of an AEU bit which caused the parity.
+ * @aeu_en_reg: Address of the AEU enable register.
+ * @bit_index: Index (0-31) of an AEU bit.
+ */
+static void qed_int_deassertion_parity(struct qed_hwfn *p_hwfn,
+ struct aeu_invert_reg_bit *p_aeu,
+ u32 aeu_en_reg, u8 bit_index)
+{
+ u32 block_id = p_aeu->block_index, mask, val;
+
+ DP_NOTICE(p_hwfn->cdev,
+ "%s parity attention is set [address 0x%08x, bit %d]\n",
+ p_aeu->bit_name, aeu_en_reg, bit_index);
+
+ if (block_id != MAX_BLOCK_ID) {
+ qed_int_attn_print(p_hwfn, block_id, ATTN_TYPE_PARITY, false);
+
+ /* In BB, there's a single parity bit for several blocks */
+ if (block_id == BLOCK_BTB) {
+ qed_int_attn_print(p_hwfn, BLOCK_OPTE,
+ ATTN_TYPE_PARITY, false);
+ qed_int_attn_print(p_hwfn, BLOCK_MCP,
+ ATTN_TYPE_PARITY, false);
+ }
+ }
+
+ /* Prevent this parity error from being re-asserted */
+ mask = ~BIT(bit_index);
+ val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg);
+ qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, val & mask);
+ DP_INFO(p_hwfn, "`%s' - Disabled future parity errors\n",
+ p_aeu->bit_name);
+}
+
+/**
+ * qed_int_deassertion() - Handle deassertion of previously asserted
+ * attentions.
+ *
+ * @p_hwfn: HW device data.
+ * @deasserted_bits: newly deasserted bits.
+ *
+ * Return: Zero value.
+ */
+static int qed_int_deassertion(struct qed_hwfn *p_hwfn,
+ u16 deasserted_bits)
+{
+ struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
+ u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask, aeu_en, en;
+ u8 i, j, k, bit_idx;
+ int rc = 0;
+
+ /* Read the attention registers in the AEU */
+ for (i = 0; i < NUM_ATTN_REGS; i++) {
+ aeu_inv_arr[i] = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt,
+ MISC_REG_AEU_AFTER_INVERT_1_IGU +
+ i * 0x4);
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "Deasserted bits [%d]: %08x\n",
+ i, aeu_inv_arr[i]);
+ }
+
+ /* Find parity attentions first */
+ for (i = 0; i < NUM_ATTN_REGS; i++) {
+ struct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i];
+ u32 parities;
+
+ aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + i * sizeof(u32);
+ en = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en);
+
+ /* Skip register in which no parity bit is currently set */
+ parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en;
+ if (!parities)
+ continue;
+
+ for (j = 0, bit_idx = 0; bit_idx < 32 && j < 32; j++) {
+ struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j];
+
+ if (qed_int_is_parity_flag(p_hwfn, p_bit) &&
+ !!(parities & BIT(bit_idx)))
+ qed_int_deassertion_parity(p_hwfn, p_bit,
+ aeu_en, bit_idx);
+
+ bit_idx += ATTENTION_LENGTH(p_bit->flags);
+ }
+ }
+
+ /* Find non-parity cause for attention and act */
+ for (k = 0; k < MAX_ATTN_GRPS; k++) {
+ struct aeu_invert_reg_bit *p_aeu;
+
+ /* Handle only groups whose attention is currently deasserted */
+ if (!(deasserted_bits & (1 << k)))
+ continue;
+
+ for (i = 0; i < NUM_ATTN_REGS; i++) {
+ u32 bits;
+
+ aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 +
+ i * sizeof(u32) +
+ k * sizeof(u32) * NUM_ATTN_REGS;
+
+ en = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en);
+ bits = aeu_inv_arr[i] & en;
+
+ /* Skip if no bit from this group is currently set */
+ if (!bits)
+ continue;
+
+ /* Find all set bits from current register which belong
+ * to current group, making them responsible for the
+ * previous assertion.
+ */
+ for (j = 0, bit_idx = 0; bit_idx < 32 && j < 32; j++) {
+ long unsigned int bitmask;
+ u8 bit, bit_len;
+
+ p_aeu = &sb_attn_sw->p_aeu_desc[i].bits[j];
+ p_aeu = qed_int_aeu_translate(p_hwfn, p_aeu);
+
+ bit = bit_idx;
+ bit_len = ATTENTION_LENGTH(p_aeu->flags);
+ if (qed_int_is_parity_flag(p_hwfn, p_aeu)) {
+ /* Skip Parity */
+ bit++;
+ bit_len--;
+ }
+
+ bitmask = bits & (((1 << bit_len) - 1) << bit);
+ bitmask >>= bit;
+
+ if (bitmask) {
+ u32 flags = p_aeu->flags;
+ char bit_name[30];
+ u8 num;
+
+ num = (u8)find_first_bit(&bitmask,
+ bit_len);
+
+ /* Some bits represent more than a
+ * single interrupt. Correctly print
+ * their name.
+ */
+ if (ATTENTION_LENGTH(flags) > 2 ||
+ ((flags & ATTENTION_PAR_INT) &&
+ ATTENTION_LENGTH(flags) > 1))
+ snprintf(bit_name, 30,
+ p_aeu->bit_name, num);
+ else
+ strscpy(bit_name,
+ p_aeu->bit_name, 30);
+
+ /* We now need to pass bitmask in its
+ * correct position.
+ */
+ bitmask <<= bit;
+
+ /* Handle source of the attention */
+ qed_int_deassertion_aeu_bit(p_hwfn,
+ p_aeu,
+ aeu_en,
+ bit_name,
+ bitmask);
+ }
+
+ bit_idx += ATTENTION_LENGTH(p_aeu->flags);
+ }
+ }
+ }
+
+ /* Handle missed DORQ attention */
+ qed_dorq_attn_handler(p_hwfn);
+
+ /* Clear IGU indication for the deasserted bits */
+ DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview +
+ GTT_BAR0_MAP_REG_IGU_CMD +
+ ((IGU_CMD_ATTN_BIT_CLR_UPPER -
+ IGU_CMD_INT_ACK_BASE) << 3),
+ ~((u32)deasserted_bits));
+
+ /* Unmask deasserted attentions in IGU */
+ aeu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE);
+ aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE);
+ qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask);
+
+ /* Clear deassertion from inner state */
+ sb_attn_sw->known_attn &= ~deasserted_bits;
+
+ return rc;
+}
+
+static int qed_int_attentions(struct qed_hwfn *p_hwfn)
+{
+ struct qed_sb_attn_info *p_sb_attn_sw = p_hwfn->p_sb_attn;
+ struct atten_status_block *p_sb_attn = p_sb_attn_sw->sb_attn;
+ u32 attn_bits = 0, attn_acks = 0;
+ u16 asserted_bits, deasserted_bits;
+ __le16 index;
+ int rc = 0;
+
+ /* Read current attention bits/acks - safeguard against attentions
+ * by guaranting work on a synchronized timeframe
+ */
+ do {
+ index = p_sb_attn->sb_index;
+ /* finish reading index before the loop condition */
+ dma_rmb();
+ attn_bits = le32_to_cpu(p_sb_attn->atten_bits);
+ attn_acks = le32_to_cpu(p_sb_attn->atten_ack);
+ } while (index != p_sb_attn->sb_index);
+ p_sb_attn->sb_index = index;
+
+ /* Attention / Deassertion are meaningful (and in correct state)
+ * only when they differ and consistent with known state - deassertion
+ * when previous attention & current ack, and assertion when current
+ * attention with no previous attention
+ */
+ asserted_bits = (attn_bits & ~attn_acks & ATTN_STATE_BITS) &
+ ~p_sb_attn_sw->known_attn;
+ deasserted_bits = (~attn_bits & attn_acks & ATTN_STATE_BITS) &
+ p_sb_attn_sw->known_attn;
+
+ if ((asserted_bits & ~0x100) || (deasserted_bits & ~0x100)) {
+ DP_INFO(p_hwfn,
+ "Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x]\n",
+ index, attn_bits, attn_acks, asserted_bits,
+ deasserted_bits, p_sb_attn_sw->known_attn);
+ } else if (asserted_bits == 0x100) {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "MFW indication via attention\n");
+ } else {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "MFW indication [deassertion]\n");
+ }
+
+ if (asserted_bits) {
+ rc = qed_int_assertion(p_hwfn, asserted_bits);
+ if (rc)
+ return rc;
+ }
+
+ if (deasserted_bits)
+ rc = qed_int_deassertion(p_hwfn, deasserted_bits);
+
+ return rc;
+}
+
+static void qed_sb_ack_attn(struct qed_hwfn *p_hwfn,
+ void __iomem *igu_addr, u32 ack_cons)
+{
+ u32 igu_ack;
+
+ igu_ack = ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
+ (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
+ (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
+ (IGU_SEG_ACCESS_ATTN <<
+ IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
+
+ DIRECT_REG_WR(igu_addr, igu_ack);
+
+ /* Both segments (interrupts & acks) are written to same place address;
+ * Need to guarantee all commands will be received (in-order) by HW.
+ */
+ barrier();
+}
+
+void qed_int_sp_dpc(struct tasklet_struct *t)
+{
+ struct qed_hwfn *p_hwfn = from_tasklet(p_hwfn, t, sp_dpc);
+ struct qed_pi_info *pi_info = NULL;
+ struct qed_sb_attn_info *sb_attn;
+ struct qed_sb_info *sb_info;
+ int arr_size;
+ u16 rc = 0;
+
+ if (!p_hwfn->p_sp_sb) {
+ DP_ERR(p_hwfn->cdev, "DPC called - no p_sp_sb\n");
+ return;
+ }
+
+ sb_info = &p_hwfn->p_sp_sb->sb_info;
+ arr_size = ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr);
+ if (!sb_info) {
+ DP_ERR(p_hwfn->cdev,
+ "Status block is NULL - cannot ack interrupts\n");
+ return;
+ }
+
+ if (!p_hwfn->p_sb_attn) {
+ DP_ERR(p_hwfn->cdev, "DPC called - no p_sb_attn");
+ return;
+ }
+ sb_attn = p_hwfn->p_sb_attn;
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "DPC Called! (hwfn %p %d)\n",
+ p_hwfn, p_hwfn->my_id);
+
+ /* Disable ack for def status block. Required both for msix +
+ * inta in non-mask mode, in inta does no harm.
+ */
+ qed_sb_ack(sb_info, IGU_INT_DISABLE, 0);
+
+ /* Gather Interrupts/Attentions information */
+ if (!sb_info->sb_virt) {
+ DP_ERR(p_hwfn->cdev,
+ "Interrupt Status block is NULL - cannot check for new interrupts!\n");
+ } else {
+ u32 tmp_index = sb_info->sb_ack;
+
+ rc = qed_sb_update_sb_idx(sb_info);
+ DP_VERBOSE(p_hwfn->cdev, NETIF_MSG_INTR,
+ "Interrupt indices: 0x%08x --> 0x%08x\n",
+ tmp_index, sb_info->sb_ack);
+ }
+
+ if (!sb_attn || !sb_attn->sb_attn) {
+ DP_ERR(p_hwfn->cdev,
+ "Attentions Status block is NULL - cannot check for new attentions!\n");
+ } else {
+ u16 tmp_index = sb_attn->index;
+
+ rc |= qed_attn_update_idx(p_hwfn, sb_attn);
+ DP_VERBOSE(p_hwfn->cdev, NETIF_MSG_INTR,
+ "Attention indices: 0x%08x --> 0x%08x\n",
+ tmp_index, sb_attn->index);
+ }
+
+ /* Check if we expect interrupts at this time. if not just ack them */
+ if (!(rc & QED_SB_EVENT_MASK)) {
+ qed_sb_ack(sb_info, IGU_INT_ENABLE, 1);
+ return;
+ }
+
+ /* Check the validity of the DPC ptt. If not ack interrupts and fail */
+ if (!p_hwfn->p_dpc_ptt) {
+ DP_NOTICE(p_hwfn->cdev, "Failed to allocate PTT\n");
+ qed_sb_ack(sb_info, IGU_INT_ENABLE, 1);
+ return;
+ }
+
+ if (rc & QED_SB_ATT_IDX)
+ qed_int_attentions(p_hwfn);
+
+ if (rc & QED_SB_IDX) {
+ int pi;
+
+ /* Look for a free index */
+ for (pi = 0; pi < arr_size; pi++) {
+ pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi];
+ if (pi_info->comp_cb)
+ pi_info->comp_cb(p_hwfn, pi_info->cookie);
+ }
+ }
+
+ if (sb_attn && (rc & QED_SB_ATT_IDX))
+ /* This should be done before the interrupts are enabled,
+ * since otherwise a new attention will be generated.
+ */
+ qed_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index);
+
+ qed_sb_ack(sb_info, IGU_INT_ENABLE, 1);
+}
+
+static void qed_int_sb_attn_free(struct qed_hwfn *p_hwfn)
+{
+ struct qed_sb_attn_info *p_sb = p_hwfn->p_sb_attn;
+
+ if (!p_sb)
+ return;
+
+ if (p_sb->sb_attn)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ SB_ATTN_ALIGNED_SIZE(p_hwfn),
+ p_sb->sb_attn, p_sb->sb_phys);
+ kfree(p_sb);
+ p_hwfn->p_sb_attn = NULL;
+}
+
+static void qed_int_sb_attn_setup(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
+
+ memset(sb_info->sb_attn, 0, sizeof(*sb_info->sb_attn));
+
+ sb_info->index = 0;
+ sb_info->known_attn = 0;
+
+ /* Configure Attention Status Block in IGU */
+ qed_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_L,
+ lower_32_bits(p_hwfn->p_sb_attn->sb_phys));
+ qed_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_H,
+ upper_32_bits(p_hwfn->p_sb_attn->sb_phys));
+}
+
+static void qed_int_sb_attn_init(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ void *sb_virt_addr, dma_addr_t sb_phy_addr)
+{
+ struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
+ int i, j, k;
+
+ sb_info->sb_attn = sb_virt_addr;
+ sb_info->sb_phys = sb_phy_addr;
+
+ /* Set the pointer to the AEU descriptors */
+ sb_info->p_aeu_desc = aeu_descs;
+
+ /* Calculate Parity Masks */
+ memset(sb_info->parity_mask, 0, sizeof(u32) * NUM_ATTN_REGS);
+ for (i = 0; i < NUM_ATTN_REGS; i++) {
+ /* j is array index, k is bit index */
+ for (j = 0, k = 0; k < 32 && j < 32; j++) {
+ struct aeu_invert_reg_bit *p_aeu;
+
+ p_aeu = &aeu_descs[i].bits[j];
+ if (qed_int_is_parity_flag(p_hwfn, p_aeu))
+ sb_info->parity_mask[i] |= 1 << k;
+
+ k += ATTENTION_LENGTH(p_aeu->flags);
+ }
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "Attn Mask [Reg %d]: 0x%08x\n",
+ i, sb_info->parity_mask[i]);
+ }
+
+ /* Set the address of cleanup for the mcp attention */
+ sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) +
+ MISC_REG_AEU_GENERAL_ATTN_0;
+
+ qed_int_sb_attn_setup(p_hwfn, p_ptt);
+}
+
+static int qed_int_sb_attn_alloc(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct qed_dev *cdev = p_hwfn->cdev;
+ struct qed_sb_attn_info *p_sb;
+ dma_addr_t p_phys = 0;
+ void *p_virt;
+
+ /* SB struct */
+ p_sb = kmalloc(sizeof(*p_sb), GFP_KERNEL);
+ if (!p_sb)
+ return -ENOMEM;
+
+ /* SB ring */
+ p_virt = dma_alloc_coherent(&cdev->pdev->dev,
+ SB_ATTN_ALIGNED_SIZE(p_hwfn),
+ &p_phys, GFP_KERNEL);
+
+ if (!p_virt) {
+ kfree(p_sb);
+ return -ENOMEM;
+ }
+
+ /* Attention setup */
+ p_hwfn->p_sb_attn = p_sb;
+ qed_int_sb_attn_init(p_hwfn, p_ptt, p_virt, p_phys);
+
+ return 0;
+}
+
+/* coalescing timeout = timeset << (timer_res + 1) */
+#define QED_CAU_DEF_RX_USECS 24
+#define QED_CAU_DEF_TX_USECS 48
+
+void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
+ struct cau_sb_entry *p_sb_entry,
+ u8 pf_id, u16 vf_number, u8 vf_valid)
+{
+ struct qed_dev *cdev = p_hwfn->cdev;
+ u32 cau_state, params = 0, data = 0;
+ u8 timer_res;
+
+ memset(p_sb_entry, 0, sizeof(*p_sb_entry));
+
+ SET_FIELD(params, CAU_SB_ENTRY_PF_NUMBER, pf_id);
+ SET_FIELD(params, CAU_SB_ENTRY_VF_NUMBER, vf_number);
+ SET_FIELD(params, CAU_SB_ENTRY_VF_VALID, vf_valid);
+ SET_FIELD(params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F);
+ SET_FIELD(params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F);
+
+ cau_state = CAU_HC_DISABLE_STATE;
+
+ if (cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) {
+ cau_state = CAU_HC_ENABLE_STATE;
+ if (!cdev->rx_coalesce_usecs)
+ cdev->rx_coalesce_usecs = QED_CAU_DEF_RX_USECS;
+ if (!cdev->tx_coalesce_usecs)
+ cdev->tx_coalesce_usecs = QED_CAU_DEF_TX_USECS;
+ }
+
+ /* Coalesce = (timeset << timer-res), timeset is 7bit wide */
+ if (cdev->rx_coalesce_usecs <= 0x7F)
+ timer_res = 0;
+ else if (cdev->rx_coalesce_usecs <= 0xFF)
+ timer_res = 1;
+ else
+ timer_res = 2;
+
+ SET_FIELD(params, CAU_SB_ENTRY_TIMER_RES0, timer_res);
+
+ if (cdev->tx_coalesce_usecs <= 0x7F)
+ timer_res = 0;
+ else if (cdev->tx_coalesce_usecs <= 0xFF)
+ timer_res = 1;
+ else
+ timer_res = 2;
+
+ SET_FIELD(params, CAU_SB_ENTRY_TIMER_RES1, timer_res);
+ p_sb_entry->params = cpu_to_le32(params);
+
+ SET_FIELD(data, CAU_SB_ENTRY_STATE0, cau_state);
+ SET_FIELD(data, CAU_SB_ENTRY_STATE1, cau_state);
+ p_sb_entry->data = cpu_to_le32(data);
+}
+
+static void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 igu_sb_id,
+ u32 pi_index,
+ enum qed_coalescing_fsm coalescing_fsm,
+ u8 timeset)
+{
+ u32 sb_offset, pi_offset;
+ u32 prod = 0;
+
+ if (IS_VF(p_hwfn->cdev))
+ return;
+
+ SET_FIELD(prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
+ if (coalescing_fsm == QED_COAL_RX_STATE_MACHINE)
+ SET_FIELD(prod, CAU_PI_ENTRY_FSM_SEL, 0);
+ else
+ SET_FIELD(prod, CAU_PI_ENTRY_FSM_SEL, 1);
+
+ sb_offset = igu_sb_id * PIS_PER_SB;
+ pi_offset = sb_offset + pi_index;
+
+ if (p_hwfn->hw_init_done)
+ qed_wr(p_hwfn, p_ptt,
+ CAU_REG_PI_MEMORY + pi_offset * sizeof(u32), prod);
+ else
+ STORE_RT_REG(p_hwfn, CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset,
+ prod);
+}
+
+void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ dma_addr_t sb_phys,
+ u16 igu_sb_id, u16 vf_number, u8 vf_valid)
+{
+ struct cau_sb_entry sb_entry;
+
+ qed_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id,
+ vf_number, vf_valid);
+
+ if (p_hwfn->hw_init_done) {
+ /* Wide-bus, initialize via DMAE */
+ u64 phys_addr = (u64)sb_phys;
+
+ qed_dmae_host2grc(p_hwfn, p_ptt, (u64)(uintptr_t)&phys_addr,
+ CAU_REG_SB_ADDR_MEMORY +
+ igu_sb_id * sizeof(u64), 2, NULL);
+ qed_dmae_host2grc(p_hwfn, p_ptt, (u64)(uintptr_t)&sb_entry,
+ CAU_REG_SB_VAR_MEMORY +
+ igu_sb_id * sizeof(u64), 2, NULL);
+ } else {
+ /* Initialize Status Block Address */
+ STORE_RT_REG_AGG(p_hwfn,
+ CAU_REG_SB_ADDR_MEMORY_RT_OFFSET +
+ igu_sb_id * 2,
+ sb_phys);
+
+ STORE_RT_REG_AGG(p_hwfn,
+ CAU_REG_SB_VAR_MEMORY_RT_OFFSET +
+ igu_sb_id * 2,
+ sb_entry);
+ }
+
+ /* Configure pi coalescing if set */
+ if (p_hwfn->cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) {
+ u8 num_tc = p_hwfn->hw_info.num_hw_tc;
+ u8 timeset, timer_res;
+ u8 i;
+
+ /* timeset = (coalesce >> timer-res), timeset is 7bit wide */
+ if (p_hwfn->cdev->rx_coalesce_usecs <= 0x7F)
+ timer_res = 0;
+ else if (p_hwfn->cdev->rx_coalesce_usecs <= 0xFF)
+ timer_res = 1;
+ else
+ timer_res = 2;
+ timeset = (u8)(p_hwfn->cdev->rx_coalesce_usecs >> timer_res);
+ qed_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI,
+ QED_COAL_RX_STATE_MACHINE, timeset);
+
+ if (p_hwfn->cdev->tx_coalesce_usecs <= 0x7F)
+ timer_res = 0;
+ else if (p_hwfn->cdev->tx_coalesce_usecs <= 0xFF)
+ timer_res = 1;
+ else
+ timer_res = 2;
+ timeset = (u8)(p_hwfn->cdev->tx_coalesce_usecs >> timer_res);
+ for (i = 0; i < num_tc; i++) {
+ qed_int_cau_conf_pi(p_hwfn, p_ptt,
+ igu_sb_id, TX_PI(i),
+ QED_COAL_TX_STATE_MACHINE,
+ timeset);
+ }
+ }
+}
+
+void qed_int_sb_setup(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, struct qed_sb_info *sb_info)
+{
+ /* zero status block and ack counter */
+ sb_info->sb_ack = 0;
+ memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
+
+ if (IS_PF(p_hwfn->cdev))
+ qed_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys,
+ sb_info->igu_sb_id, 0, 0);
+}
+
+struct qed_igu_block *qed_get_igu_free_sb(struct qed_hwfn *p_hwfn, bool b_is_pf)
+{
+ struct qed_igu_block *p_block;
+ u16 igu_id;
+
+ for (igu_id = 0; igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
+ igu_id++) {
+ p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id];
+
+ if (!(p_block->status & QED_IGU_STATUS_VALID) ||
+ !(p_block->status & QED_IGU_STATUS_FREE))
+ continue;
+
+ if (!!(p_block->status & QED_IGU_STATUS_PF) == b_is_pf)
+ return p_block;
+ }
+
+ return NULL;
+}
+
+static u16 qed_get_pf_igu_sb_id(struct qed_hwfn *p_hwfn, u16 vector_id)
+{
+ struct qed_igu_block *p_block;
+ u16 igu_id;
+
+ for (igu_id = 0; igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
+ igu_id++) {
+ p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id];
+
+ if (!(p_block->status & QED_IGU_STATUS_VALID) ||
+ !p_block->is_pf ||
+ p_block->vector_number != vector_id)
+ continue;
+
+ return igu_id;
+ }
+
+ return QED_SB_INVALID_IDX;
+}
+
+u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
+{
+ u16 igu_sb_id;
+
+ /* Assuming continuous set of IGU SBs dedicated for given PF */
+ if (sb_id == QED_SP_SB_ID)
+ igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
+ else if (IS_PF(p_hwfn->cdev))
+ igu_sb_id = qed_get_pf_igu_sb_id(p_hwfn, sb_id + 1);
+ else
+ igu_sb_id = qed_vf_get_igu_sb_id(p_hwfn, sb_id);
+
+ if (sb_id == QED_SP_SB_ID)
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id);
+ else
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "SB [%04x] <--> IGU SB [%04x]\n", sb_id, igu_sb_id);
+
+ return igu_sb_id;
+}
+
+int qed_int_sb_init(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_sb_info *sb_info,
+ void *sb_virt_addr, dma_addr_t sb_phy_addr, u16 sb_id)
+{
+ sb_info->sb_virt = sb_virt_addr;
+ sb_info->sb_phys = sb_phy_addr;
+
+ sb_info->igu_sb_id = qed_get_igu_sb_id(p_hwfn, sb_id);
+
+ if (sb_id != QED_SP_SB_ID) {
+ if (IS_PF(p_hwfn->cdev)) {
+ struct qed_igu_info *p_info;
+ struct qed_igu_block *p_block;
+
+ p_info = p_hwfn->hw_info.p_igu_info;
+ p_block = &p_info->entry[sb_info->igu_sb_id];
+
+ p_block->sb_info = sb_info;
+ p_block->status &= ~QED_IGU_STATUS_FREE;
+ p_info->usage.free_cnt--;
+ } else {
+ qed_vf_set_sb_info(p_hwfn, sb_id, sb_info);
+ }
+ }
+
+ sb_info->cdev = p_hwfn->cdev;
+
+ /* The igu address will hold the absolute address that needs to be
+ * written to for a specific status block
+ */
+ if (IS_PF(p_hwfn->cdev)) {
+ sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview +
+ GTT_BAR0_MAP_REG_IGU_CMD +
+ (sb_info->igu_sb_id << 3);
+ } else {
+ sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview +
+ PXP_VF_BAR0_START_IGU +
+ ((IGU_CMD_INT_ACK_BASE +
+ sb_info->igu_sb_id) << 3);
+ }
+
+ sb_info->flags |= QED_SB_INFO_INIT;
+
+ qed_int_sb_setup(p_hwfn, p_ptt, sb_info);
+
+ return 0;
+}
+
+int qed_int_sb_release(struct qed_hwfn *p_hwfn,
+ struct qed_sb_info *sb_info, u16 sb_id)
+{
+ struct qed_igu_block *p_block;
+ struct qed_igu_info *p_info;
+
+ if (!sb_info)
+ return 0;
+
+ /* zero status block and ack counter */
+ sb_info->sb_ack = 0;
+ memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
+
+ if (IS_VF(p_hwfn->cdev)) {
+ qed_vf_set_sb_info(p_hwfn, sb_id, NULL);
+ return 0;
+ }
+
+ p_info = p_hwfn->hw_info.p_igu_info;
+ p_block = &p_info->entry[sb_info->igu_sb_id];
+
+ /* Vector 0 is reserved to Default SB */
+ if (!p_block->vector_number) {
+ DP_ERR(p_hwfn, "Do Not free sp sb using this function");
+ return -EINVAL;
+ }
+
+ /* Lose reference to client's SB info, and fix counters */
+ p_block->sb_info = NULL;
+ p_block->status |= QED_IGU_STATUS_FREE;
+ p_info->usage.free_cnt++;
+
+ return 0;
+}
+
+static void qed_int_sp_sb_free(struct qed_hwfn *p_hwfn)
+{
+ struct qed_sb_sp_info *p_sb = p_hwfn->p_sp_sb;
+
+ if (!p_sb)
+ return;
+
+ if (p_sb->sb_info.sb_virt)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ SB_ALIGNED_SIZE(p_hwfn),
+ p_sb->sb_info.sb_virt,
+ p_sb->sb_info.sb_phys);
+ kfree(p_sb);
+ p_hwfn->p_sp_sb = NULL;
+}
+
+static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_sb_sp_info *p_sb;
+ dma_addr_t p_phys = 0;
+ void *p_virt;
+
+ /* SB struct */
+ p_sb = kmalloc(sizeof(*p_sb), GFP_KERNEL);
+ if (!p_sb)
+ return -ENOMEM;
+
+ /* SB ring */
+ p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ SB_ALIGNED_SIZE(p_hwfn),
+ &p_phys, GFP_KERNEL);
+ if (!p_virt) {
+ kfree(p_sb);
+ return -ENOMEM;
+ }
+
+ /* Status Block setup */
+ p_hwfn->p_sp_sb = p_sb;
+ qed_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info, p_virt,
+ p_phys, QED_SP_SB_ID);
+
+ memset(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr));
+
+ return 0;
+}
+
+int qed_int_register_cb(struct qed_hwfn *p_hwfn,
+ qed_int_comp_cb_t comp_cb,
+ void *cookie, u8 *sb_idx, __le16 **p_fw_cons)
+{
+ struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
+ int rc = -ENOMEM;
+ u8 pi;
+
+ /* Look for a free index */
+ for (pi = 0; pi < ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) {
+ if (p_sp_sb->pi_info_arr[pi].comp_cb)
+ continue;
+
+ p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb;
+ p_sp_sb->pi_info_arr[pi].cookie = cookie;
+ *sb_idx = pi;
+ *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi];
+ rc = 0;
+ break;
+ }
+
+ return rc;
+}
+
+int qed_int_unregister_cb(struct qed_hwfn *p_hwfn, u8 pi)
+{
+ struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
+
+ if (p_sp_sb->pi_info_arr[pi].comp_cb == NULL)
+ return -ENOMEM;
+
+ p_sp_sb->pi_info_arr[pi].comp_cb = NULL;
+ p_sp_sb->pi_info_arr[pi].cookie = NULL;
+
+ return 0;
+}
+
+u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn)
+{
+ return p_hwfn->p_sp_sb->sb_info.igu_sb_id;
+}
+
+void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, enum qed_int_mode int_mode)
+{
+ u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_ATTN_BIT_EN;
+
+ p_hwfn->cdev->int_mode = int_mode;
+ switch (p_hwfn->cdev->int_mode) {
+ case QED_INT_MODE_INTA:
+ igu_pf_conf |= IGU_PF_CONF_INT_LINE_EN;
+ igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
+ break;
+
+ case QED_INT_MODE_MSI:
+ igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
+ igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
+ break;
+
+ case QED_INT_MODE_MSIX:
+ igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
+ break;
+ case QED_INT_MODE_POLL:
+ break;
+ }
+
+ qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf);
+}
+
+static void qed_int_igu_enable_attn(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+
+ /* Configure AEU signal change to produce attentions */
+ qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0);
+ qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff);
+ qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff);
+ qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff);
+
+ /* Unmask AEU signals toward IGU */
+ qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff);
+}
+
+int
+qed_int_igu_enable(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, enum qed_int_mode int_mode)
+{
+ int rc = 0;
+
+ qed_int_igu_enable_attn(p_hwfn, p_ptt);
+
+ if ((int_mode != QED_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) {
+ rc = qed_slowpath_irq_req(p_hwfn);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Slowpath IRQ request failed\n");
+ return -EINVAL;
+ }
+ p_hwfn->b_int_requested = true;
+ }
+ /* Enable interrupt Generation */
+ qed_int_igu_enable_int(p_hwfn, p_ptt, int_mode);
+ p_hwfn->b_int_enabled = 1;
+
+ return rc;
+}
+
+void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ p_hwfn->b_int_enabled = 0;
+
+ if (IS_VF(p_hwfn->cdev))
+ return;
+
+ qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0);
+}
+
+#define IGU_CLEANUP_SLEEP_LENGTH (1000)
+static void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 igu_sb_id,
+ bool cleanup_set, u16 opaque_fid)
+{
+ u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0;
+ u32 pxp_addr = IGU_CMD_INT_ACK_BASE + igu_sb_id;
+ u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH;
+
+ /* Set the data field */
+ SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0);
+ SET_FIELD(data, IGU_CLEANUP_CLEANUP_TYPE, 0);
+ SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET);
+
+ /* Set the control register */
+ SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr);
+ SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid);
+ SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR);
+
+ qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_32LSB_DATA, data);
+
+ barrier();
+
+ qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl);
+
+ /* calculate where to read the status bit from */
+ sb_bit = 1 << (igu_sb_id % 32);
+ sb_bit_addr = igu_sb_id / 32 * sizeof(u32);
+
+ sb_bit_addr += IGU_REG_CLEANUP_STATUS_0;
+
+ /* Now wait for the command to complete */
+ do {
+ val = qed_rd(p_hwfn, p_ptt, sb_bit_addr);
+
+ if ((val & sb_bit) == (cleanup_set ? sb_bit : 0))
+ break;
+
+ usleep_range(5000, 10000);
+ } while (--sleep_cnt);
+
+ if (!sleep_cnt)
+ DP_NOTICE(p_hwfn,
+ "Timeout waiting for clear status 0x%08x [for sb %d]\n",
+ val, igu_sb_id);
+}
+
+void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 igu_sb_id, u16 opaque, bool b_set)
+{
+ struct qed_igu_block *p_block;
+ int pi, i;
+
+ p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id];
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "Cleaning SB [%04x]: func_id= %d is_pf = %d vector_num = 0x%0x\n",
+ igu_sb_id,
+ p_block->function_id,
+ p_block->is_pf, p_block->vector_number);
+
+ /* Set */
+ if (b_set)
+ qed_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 1, opaque);
+
+ /* Clear */
+ qed_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 0, opaque);
+
+ /* Wait for the IGU SB to cleanup */
+ for (i = 0; i < IGU_CLEANUP_SLEEP_LENGTH; i++) {
+ u32 val;
+
+ val = qed_rd(p_hwfn, p_ptt,
+ IGU_REG_WRITE_DONE_PENDING +
+ ((igu_sb_id / 32) * 4));
+ if (val & BIT((igu_sb_id % 32)))
+ usleep_range(10, 20);
+ else
+ break;
+ }
+ if (i == IGU_CLEANUP_SLEEP_LENGTH)
+ DP_NOTICE(p_hwfn,
+ "Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n",
+ igu_sb_id);
+
+ /* Clear the CAU for the SB */
+ for (pi = 0; pi < 12; pi++)
+ qed_wr(p_hwfn, p_ptt,
+ CAU_REG_PI_MEMORY + (igu_sb_id * 12 + pi) * 4, 0);
+}
+
+void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ bool b_set, bool b_slowpath)
+{
+ struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
+ struct qed_igu_block *p_block;
+ u16 igu_sb_id = 0;
+ u32 val = 0;
+
+ val = qed_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION);
+ val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN;
+ val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN;
+ qed_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val);
+
+ for (igu_sb_id = 0;
+ igu_sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); igu_sb_id++) {
+ p_block = &p_info->entry[igu_sb_id];
+
+ if (!(p_block->status & QED_IGU_STATUS_VALID) ||
+ !p_block->is_pf ||
+ (p_block->status & QED_IGU_STATUS_DSB))
+ continue;
+
+ qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, igu_sb_id,
+ p_hwfn->hw_info.opaque_fid,
+ b_set);
+ }
+
+ if (b_slowpath)
+ qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
+ p_info->igu_dsb_id,
+ p_hwfn->hw_info.opaque_fid,
+ b_set);
+}
+
+int qed_int_igu_reset_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
+ struct qed_igu_block *p_block;
+ int pf_sbs, vf_sbs;
+ u16 igu_sb_id;
+ u32 val, rval;
+
+ if (!RESC_NUM(p_hwfn, QED_SB)) {
+ p_info->b_allow_pf_vf_change = false;
+ } else {
+ /* Use the numbers the MFW have provided -
+ * don't forget MFW accounts for the default SB as well.
+ */
+ p_info->b_allow_pf_vf_change = true;
+
+ if (p_info->usage.cnt != RESC_NUM(p_hwfn, QED_SB) - 1) {
+ DP_INFO(p_hwfn,
+ "MFW notifies of 0x%04x PF SBs; IGU indicates of only 0x%04x\n",
+ RESC_NUM(p_hwfn, QED_SB) - 1,
+ p_info->usage.cnt);
+ p_info->usage.cnt = RESC_NUM(p_hwfn, QED_SB) - 1;
+ }
+
+ if (IS_PF_SRIOV(p_hwfn)) {
+ u16 vfs = p_hwfn->cdev->p_iov_info->total_vfs;
+
+ if (vfs != p_info->usage.iov_cnt)
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_INTR,
+ "0x%04x VF SBs in IGU CAM != PCI configuration 0x%04x\n",
+ p_info->usage.iov_cnt, vfs);
+
+ /* At this point we know how many SBs we have totally
+ * in IGU + number of PF SBs. So we can validate that
+ * we'd have sufficient for VF.
+ */
+ if (vfs > p_info->usage.free_cnt +
+ p_info->usage.free_cnt_iov - p_info->usage.cnt) {
+ DP_NOTICE(p_hwfn,
+ "Not enough SBs for VFs - 0x%04x SBs, from which %04x PFs and %04x are required\n",
+ p_info->usage.free_cnt +
+ p_info->usage.free_cnt_iov,
+ p_info->usage.cnt, vfs);
+ return -EINVAL;
+ }
+
+ /* Currently cap the number of VFs SBs by the
+ * number of VFs.
+ */
+ p_info->usage.iov_cnt = vfs;
+ }
+ }
+
+ /* Mark all SBs as free, now in the right PF/VFs division */
+ p_info->usage.free_cnt = p_info->usage.cnt;
+ p_info->usage.free_cnt_iov = p_info->usage.iov_cnt;
+ p_info->usage.orig = p_info->usage.cnt;
+ p_info->usage.iov_orig = p_info->usage.iov_cnt;
+
+ /* We now proceed to re-configure the IGU cam to reflect the initial
+ * configuration. We can start with the Default SB.
+ */
+ pf_sbs = p_info->usage.cnt;
+ vf_sbs = p_info->usage.iov_cnt;
+
+ for (igu_sb_id = p_info->igu_dsb_id;
+ igu_sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); igu_sb_id++) {
+ p_block = &p_info->entry[igu_sb_id];
+ val = 0;
+
+ if (!(p_block->status & QED_IGU_STATUS_VALID))
+ continue;
+
+ if (p_block->status & QED_IGU_STATUS_DSB) {
+ p_block->function_id = p_hwfn->rel_pf_id;
+ p_block->is_pf = 1;
+ p_block->vector_number = 0;
+ p_block->status = QED_IGU_STATUS_VALID |
+ QED_IGU_STATUS_PF |
+ QED_IGU_STATUS_DSB;
+ } else if (pf_sbs) {
+ pf_sbs--;
+ p_block->function_id = p_hwfn->rel_pf_id;
+ p_block->is_pf = 1;
+ p_block->vector_number = p_info->usage.cnt - pf_sbs;
+ p_block->status = QED_IGU_STATUS_VALID |
+ QED_IGU_STATUS_PF |
+ QED_IGU_STATUS_FREE;
+ } else if (vf_sbs) {
+ p_block->function_id =
+ p_hwfn->cdev->p_iov_info->first_vf_in_pf +
+ p_info->usage.iov_cnt - vf_sbs;
+ p_block->is_pf = 0;
+ p_block->vector_number = 0;
+ p_block->status = QED_IGU_STATUS_VALID |
+ QED_IGU_STATUS_FREE;
+ vf_sbs--;
+ } else {
+ p_block->function_id = 0;
+ p_block->is_pf = 0;
+ p_block->vector_number = 0;
+ }
+
+ SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER,
+ p_block->function_id);
+ SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf);
+ SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER,
+ p_block->vector_number);
+
+ /* VF entries would be enabled when VF is initializaed */
+ SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf);
+
+ rval = qed_rd(p_hwfn, p_ptt,
+ IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id);
+
+ if (rval != val) {
+ qed_wr(p_hwfn, p_ptt,
+ IGU_REG_MAPPING_MEMORY +
+ sizeof(u32) * igu_sb_id, val);
+
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_INTR,
+ "IGU reset: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x [%08x -> %08x]\n",
+ igu_sb_id,
+ p_block->function_id,
+ p_block->is_pf,
+ p_block->vector_number, rval, val);
+ }
+ }
+
+ return 0;
+}
+
+static void qed_int_igu_read_cam_block(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u16 igu_sb_id)
+{
+ u32 val = qed_rd(p_hwfn, p_ptt,
+ IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id);
+ struct qed_igu_block *p_block;
+
+ p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id];
+
+ /* Fill the block information */
+ p_block->function_id = GET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER);
+ p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID);
+ p_block->vector_number = GET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER);
+ p_block->igu_sb_id = igu_sb_id;
+}
+
+int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_igu_info *p_igu_info;
+ struct qed_igu_block *p_block;
+ u32 min_vf = 0, max_vf = 0;
+ u16 igu_sb_id;
+
+ p_hwfn->hw_info.p_igu_info = kzalloc(sizeof(*p_igu_info), GFP_KERNEL);
+ if (!p_hwfn->hw_info.p_igu_info)
+ return -ENOMEM;
+
+ p_igu_info = p_hwfn->hw_info.p_igu_info;
+
+ /* Distinguish between existent and non-existent default SB */
+ p_igu_info->igu_dsb_id = QED_SB_INVALID_IDX;
+
+ /* Find the range of VF ids whose SB belong to this PF */
+ if (p_hwfn->cdev->p_iov_info) {
+ struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
+
+ min_vf = p_iov->first_vf_in_pf;
+ max_vf = p_iov->first_vf_in_pf + p_iov->total_vfs;
+ }
+
+ for (igu_sb_id = 0;
+ igu_sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); igu_sb_id++) {
+ /* Read current entry; Notice it might not belong to this PF */
+ qed_int_igu_read_cam_block(p_hwfn, p_ptt, igu_sb_id);
+ p_block = &p_igu_info->entry[igu_sb_id];
+
+ if ((p_block->is_pf) &&
+ (p_block->function_id == p_hwfn->rel_pf_id)) {
+ p_block->status = QED_IGU_STATUS_PF |
+ QED_IGU_STATUS_VALID |
+ QED_IGU_STATUS_FREE;
+
+ if (p_igu_info->igu_dsb_id != QED_SB_INVALID_IDX)
+ p_igu_info->usage.cnt++;
+ } else if (!(p_block->is_pf) &&
+ (p_block->function_id >= min_vf) &&
+ (p_block->function_id < max_vf)) {
+ /* Available for VFs of this PF */
+ p_block->status = QED_IGU_STATUS_VALID |
+ QED_IGU_STATUS_FREE;
+
+ if (p_igu_info->igu_dsb_id != QED_SB_INVALID_IDX)
+ p_igu_info->usage.iov_cnt++;
+ }
+
+ /* Mark the First entry belonging to the PF or its VFs
+ * as the default SB [we'll reset IGU prior to first usage].
+ */
+ if ((p_block->status & QED_IGU_STATUS_VALID) &&
+ (p_igu_info->igu_dsb_id == QED_SB_INVALID_IDX)) {
+ p_igu_info->igu_dsb_id = igu_sb_id;
+ p_block->status |= QED_IGU_STATUS_DSB;
+ }
+
+ /* limit number of prints by having each PF print only its
+ * entries with the exception of PF0 which would print
+ * everything.
+ */
+ if ((p_block->status & QED_IGU_STATUS_VALID) ||
+ (p_hwfn->abs_pf_id == 0)) {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "IGU_BLOCK: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n",
+ igu_sb_id, p_block->function_id,
+ p_block->is_pf, p_block->vector_number);
+ }
+ }
+
+ if (p_igu_info->igu_dsb_id == QED_SB_INVALID_IDX) {
+ DP_NOTICE(p_hwfn,
+ "IGU CAM returned invalid values igu_dsb_id=0x%x\n",
+ p_igu_info->igu_dsb_id);
+ return -EINVAL;
+ }
+
+ /* All non default SB are considered free at this point */
+ p_igu_info->usage.free_cnt = p_igu_info->usage.cnt;
+ p_igu_info->usage.free_cnt_iov = p_igu_info->usage.iov_cnt;
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "igu_dsb_id=0x%x, num Free SBs - PF: %04x VF: %04x [might change after resource allocation]\n",
+ p_igu_info->igu_dsb_id,
+ p_igu_info->usage.cnt, p_igu_info->usage.iov_cnt);
+
+ return 0;
+}
+
+/**
+ * qed_int_igu_init_rt() - Initialize IGU runtime registers.
+ *
+ * @p_hwfn: HW device data.
+ */
+void qed_int_igu_init_rt(struct qed_hwfn *p_hwfn)
+{
+ u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN;
+
+ STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf);
+}
+
+u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn)
+{
+ u32 lsb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_LSB_UPPER -
+ IGU_CMD_INT_ACK_BASE;
+ u32 msb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_MSB_UPPER -
+ IGU_CMD_INT_ACK_BASE;
+ u32 intr_status_hi = 0, intr_status_lo = 0;
+ u64 intr_status = 0;
+
+ intr_status_lo = REG_RD(p_hwfn,
+ GTT_BAR0_MAP_REG_IGU_CMD +
+ lsb_igu_cmd_addr * 8);
+ intr_status_hi = REG_RD(p_hwfn,
+ GTT_BAR0_MAP_REG_IGU_CMD +
+ msb_igu_cmd_addr * 8);
+ intr_status = ((u64)intr_status_hi << 32) + (u64)intr_status_lo;
+
+ return intr_status;
+}
+
+static void qed_int_sp_dpc_setup(struct qed_hwfn *p_hwfn)
+{
+ tasklet_setup(&p_hwfn->sp_dpc, qed_int_sp_dpc);
+ p_hwfn->b_sp_dpc_enabled = true;
+}
+
+int qed_int_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ int rc = 0;
+
+ rc = qed_int_sp_sb_alloc(p_hwfn, p_ptt);
+ if (rc)
+ return rc;
+
+ rc = qed_int_sb_attn_alloc(p_hwfn, p_ptt);
+
+ return rc;
+}
+
+void qed_int_free(struct qed_hwfn *p_hwfn)
+{
+ qed_int_sp_sb_free(p_hwfn);
+ qed_int_sb_attn_free(p_hwfn);
+}
+
+void qed_int_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ qed_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info);
+ qed_int_sb_attn_setup(p_hwfn, p_ptt);
+ qed_int_sp_dpc_setup(p_hwfn);
+}
+
+void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
+ struct qed_sb_cnt_info *p_sb_cnt_info)
+{
+ struct qed_igu_info *info = p_hwfn->hw_info.p_igu_info;
+
+ if (!info || !p_sb_cnt_info)
+ return;
+
+ memcpy(p_sb_cnt_info, &info->usage, sizeof(*p_sb_cnt_info));
+}
+
+void qed_int_disable_post_isr_release(struct qed_dev *cdev)
+{
+ int i;
+
+ for_each_hwfn(cdev, i)
+ cdev->hwfns[i].b_int_requested = false;
+}
+
+void qed_int_attn_clr_enable(struct qed_dev *cdev, bool clr_enable)
+{
+ cdev->attn_clr_en = clr_enable;
+}
+
+int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u8 timer_res, u16 sb_id, bool tx)
+{
+ struct cau_sb_entry sb_entry;
+ u32 params;
+ int rc;
+
+ if (!p_hwfn->hw_init_done) {
+ DP_ERR(p_hwfn, "hardware not initialized yet\n");
+ return -EINVAL;
+ }
+
+ rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
+ sb_id * sizeof(u64),
+ (u64)(uintptr_t)&sb_entry, 2, NULL);
+ if (rc) {
+ DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
+ return rc;
+ }
+
+ params = le32_to_cpu(sb_entry.params);
+
+ if (tx)
+ SET_FIELD(params, CAU_SB_ENTRY_TIMER_RES1, timer_res);
+ else
+ SET_FIELD(params, CAU_SB_ENTRY_TIMER_RES0, timer_res);
+
+ sb_entry.params = cpu_to_le32(params);
+
+ rc = qed_dmae_host2grc(p_hwfn, p_ptt,
+ (u64)(uintptr_t)&sb_entry,
+ CAU_REG_SB_VAR_MEMORY +
+ sb_id * sizeof(u64), 2, NULL);
+ if (rc) {
+ DP_ERR(p_hwfn, "dmae_host2grc failed %d\n", rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+int qed_int_get_sb_dbg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ struct qed_sb_info *p_sb, struct qed_sb_info_dbg *p_info)
+{
+ u16 sbid = p_sb->igu_sb_id;
+ u32 i;
+
+ if (IS_VF(p_hwfn->cdev))
+ return -EINVAL;
+
+ if (sbid >= NUM_OF_SBS(p_hwfn->cdev))
+ return -EINVAL;
+
+ p_info->igu_prod = qed_rd(p_hwfn, p_ptt, IGU_REG_PRODUCER_MEMORY + sbid * 4);
+ p_info->igu_cons = qed_rd(p_hwfn, p_ptt, IGU_REG_CONSUMER_MEM + sbid * 4);
+
+ for (i = 0; i < PIS_PER_SB; i++)
+ p_info->pi[i] = (u16)qed_rd(p_hwfn, p_ptt,
+ CAU_REG_PI_MEMORY + sbid * 4 * PIS_PER_SB + i * 4);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h
new file mode 100644
index 000000000..7e5127f61
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.h
@@ -0,0 +1,459 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ * Copyright (c) 2019-2020 Marvell International Ltd.
+ */
+
+#ifndef _QED_INT_H
+#define _QED_INT_H
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include "qed.h"
+
+/* Fields of IGU PF CONFIGURATION REGISTER */
+#define IGU_PF_CONF_FUNC_EN (0x1 << 0) /* function enable */
+#define IGU_PF_CONF_MSI_MSIX_EN (0x1 << 1) /* MSI/MSIX enable */
+#define IGU_PF_CONF_INT_LINE_EN (0x1 << 2) /* INT enable */
+#define IGU_PF_CONF_ATTN_BIT_EN (0x1 << 3) /* attention enable */
+#define IGU_PF_CONF_SINGLE_ISR_EN (0x1 << 4) /* single ISR mode enable */
+#define IGU_PF_CONF_SIMD_MODE (0x1 << 5) /* simd all ones mode */
+/* Fields of IGU VF CONFIGURATION REGISTER */
+#define IGU_VF_CONF_FUNC_EN (0x1 << 0) /* function enable */
+#define IGU_VF_CONF_MSI_MSIX_EN (0x1 << 1) /* MSI/MSIX enable */
+#define IGU_VF_CONF_SINGLE_ISR_EN (0x1 << 4) /* single ISR mode enable */
+#define IGU_VF_CONF_PARENT_MASK (0xF) /* Parent PF */
+#define IGU_VF_CONF_PARENT_SHIFT 5 /* Parent PF */
+
+/* Igu control commands
+ */
+enum igu_ctrl_cmd {
+ IGU_CTRL_CMD_TYPE_RD,
+ IGU_CTRL_CMD_TYPE_WR,
+ MAX_IGU_CTRL_CMD
+};
+
+/* Control register for the IGU command register
+ */
+struct igu_ctrl_reg {
+ u32 ctrl_data;
+#define IGU_CTRL_REG_FID_MASK 0xFFFF /* Opaque_FID */
+#define IGU_CTRL_REG_FID_SHIFT 0
+#define IGU_CTRL_REG_PXP_ADDR_MASK 0xFFF /* Command address */
+#define IGU_CTRL_REG_PXP_ADDR_SHIFT 16
+#define IGU_CTRL_REG_RESERVED_MASK 0x1
+#define IGU_CTRL_REG_RESERVED_SHIFT 28
+#define IGU_CTRL_REG_TYPE_MASK 0x1 /* use enum igu_ctrl_cmd */
+#define IGU_CTRL_REG_TYPE_SHIFT 31
+};
+
+enum qed_coalescing_fsm {
+ QED_COAL_RX_STATE_MACHINE,
+ QED_COAL_TX_STATE_MACHINE
+};
+
+/**
+ * qed_int_igu_enable_int(): Enable device interrupts.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @int_mode: Interrupt mode to use.
+ *
+ * Return: Void.
+ */
+void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_int_mode int_mode);
+
+/**
+ * qed_int_igu_disable_int(): Disable device interrupts.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ *
+ * Return: Void.
+ */
+void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * qed_int_igu_read_sisr_reg(): Reads the single isr multiple dpc
+ * register from igu.
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Return: u64.
+ */
+u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn);
+
+#define QED_SP_SB_ID 0xffff
+/**
+ * qed_int_sb_init(): Initializes the sb_info structure.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @sb_info: points to an uninitialized (but allocated) sb_info structure
+ * @sb_virt_addr: SB Virtual address.
+ * @sb_phy_addr: SB Physial address.
+ * @sb_id: the sb_id to be used (zero based in driver)
+ * should use QED_SP_SB_ID for SP Status block
+ *
+ * Return: int.
+ *
+ * Once the structure is initialized it can be passed to sb related functions.
+ */
+int qed_int_sb_init(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_sb_info *sb_info,
+ void *sb_virt_addr,
+ dma_addr_t sb_phy_addr,
+ u16 sb_id);
+/**
+ * qed_int_sb_setup(): Setup the sb.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @sb_info: Initialized sb_info structure.
+ *
+ * Return: Void.
+ */
+void qed_int_sb_setup(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_sb_info *sb_info);
+
+/**
+ * qed_int_sb_release(): Releases the sb_info structure.
+ *
+ * @p_hwfn: HW device data.
+ * @sb_info: Points to an allocated sb_info structure.
+ * @sb_id: The sb_id to be used (zero based in driver)
+ * should never be equal to QED_SP_SB_ID
+ * (SP Status block).
+ *
+ * Return: int.
+ *
+ * Once the structure is released, it's memory can be freed.
+ */
+int qed_int_sb_release(struct qed_hwfn *p_hwfn,
+ struct qed_sb_info *sb_info,
+ u16 sb_id);
+
+/**
+ * qed_int_sp_dpc(): To be called when an interrupt is received on the
+ * default status block.
+ *
+ * @t: Tasklet.
+ *
+ * Return: Void.
+ *
+ */
+void qed_int_sp_dpc(struct tasklet_struct *t);
+
+/**
+ * qed_int_get_num_sbs(): Get the number of status blocks configured
+ * for this funciton in the igu.
+ *
+ * @p_hwfn: HW device data.
+ * @p_sb_cnt_info: Pointer to SB count info.
+ *
+ * Return: Void.
+ */
+void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
+ struct qed_sb_cnt_info *p_sb_cnt_info);
+
+/**
+ * qed_int_disable_post_isr_release(): Performs the cleanup post ISR
+ * release. The API need to be called after releasing all slowpath IRQs
+ * of the device.
+ *
+ * @cdev: Qed dev pointer.
+ *
+ * Return: Void.
+ */
+void qed_int_disable_post_isr_release(struct qed_dev *cdev);
+
+/**
+ * qed_int_attn_clr_enable: Sets whether the general behavior is
+ * preventing attentions from being reasserted, or following the
+ * attributes of the specific attention.
+ *
+ * @cdev: Qed dev pointer.
+ * @clr_enable: Clear enable
+ *
+ * Return: Void.
+ *
+ */
+void qed_int_attn_clr_enable(struct qed_dev *cdev, bool clr_enable);
+
+/**
+ * qed_int_get_sb_dbg: Read debug information regarding a given SB
+ *
+ * @p_hwfn: hw function pointer
+ * @p_ptt: ptt resource
+ * @p_sb: pointer to status block for which we want to get info
+ * @p_info: pointer to struct to fill with information regarding SB
+ *
+ * Return: 0 with status block info filled on success, otherwise return error
+ */
+int qed_int_get_sb_dbg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ struct qed_sb_info *p_sb, struct qed_sb_info_dbg *p_info);
+
+/**
+ * qed_db_rec_handler(): Doorbell Recovery handler.
+ * Run doorbell recovery in case of PF overflow (and flush DORQ if
+ * needed).
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ *
+ * Return: Int.
+ */
+int qed_db_rec_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+#define QED_CAU_DEF_RX_TIMER_RES 0
+#define QED_CAU_DEF_TX_TIMER_RES 0
+
+#define QED_SB_ATT_IDX 0x0001
+#define QED_SB_EVENT_MASK 0x0003
+
+#define SB_ALIGNED_SIZE(p_hwfn) \
+ ALIGNED_TYPE_SIZE(struct status_block, p_hwfn)
+
+#define QED_SB_INVALID_IDX 0xffff
+
+struct qed_igu_block {
+ u8 status;
+#define QED_IGU_STATUS_FREE 0x01
+#define QED_IGU_STATUS_VALID 0x02
+#define QED_IGU_STATUS_PF 0x04
+#define QED_IGU_STATUS_DSB 0x08
+
+ u8 vector_number;
+ u8 function_id;
+ u8 is_pf;
+
+ /* Index inside IGU [meant for back reference] */
+ u16 igu_sb_id;
+
+ struct qed_sb_info *sb_info;
+};
+
+struct qed_igu_info {
+ struct qed_igu_block entry[MAX_TOT_SB_PER_PATH];
+ u16 igu_dsb_id;
+
+ struct qed_sb_cnt_info usage;
+
+ bool b_allow_pf_vf_change;
+};
+
+/**
+ * qed_int_igu_reset_cam(): Make sure the IGU CAM reflects the resources
+ * provided by MFW.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ *
+ * Return: Void.
+ */
+int qed_int_igu_reset_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+/**
+ * qed_get_igu_sb_id(): Translate the weakly-defined client sb-id into
+ * an IGU sb-id
+ *
+ * @p_hwfn: HW device data.
+ * @sb_id: user provided sb_id.
+ *
+ * Return: An index inside IGU CAM where the SB resides.
+ */
+u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id);
+
+/**
+ * qed_get_igu_free_sb(): Return a pointer to an unused valid SB
+ *
+ * @p_hwfn: HW device data.
+ * @b_is_pf: True iff we want a SB belonging to a PF.
+ *
+ * Return: Point to an igu_block, NULL if none is available.
+ */
+struct qed_igu_block *qed_get_igu_free_sb(struct qed_hwfn *p_hwfn,
+ bool b_is_pf);
+
+void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ bool b_set,
+ bool b_slowpath);
+
+void qed_int_igu_init_rt(struct qed_hwfn *p_hwfn);
+
+/**
+ * qed_int_igu_read_cam(): Reads the IGU CAM.
+ * This function needs to be called during hardware
+ * prepare. It reads the info from igu cam to know which
+ * status block is the default / base status block etc.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ *
+ * Return: Int.
+ */
+int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+typedef int (*qed_int_comp_cb_t)(struct qed_hwfn *p_hwfn,
+ void *cookie);
+/**
+ * qed_int_register_cb(): Register callback func for slowhwfn statusblock.
+ *
+ * @p_hwfn: HW device data.
+ * @comp_cb: Function to be called when there is an
+ * interrupt on the sp sb
+ * @cookie: Passed to the callback function
+ * @sb_idx: (OUT) parameter which gives the chosen index
+ * for this protocol.
+ * @p_fw_cons: Pointer to the actual address of the
+ * consumer for this protocol.
+ *
+ * Return: Int.
+ *
+ * Every protocol that uses the slowhwfn status block
+ * should register a callback function that will be called
+ * once there is an update of the sp status block.
+ */
+int qed_int_register_cb(struct qed_hwfn *p_hwfn,
+ qed_int_comp_cb_t comp_cb,
+ void *cookie,
+ u8 *sb_idx,
+ __le16 **p_fw_cons);
+
+/**
+ * qed_int_unregister_cb(): Unregisters callback function from sp sb.
+ *
+ * @p_hwfn: HW device data.
+ * @pi: Producer Index.
+ *
+ * Return: Int.
+ *
+ * Partner of qed_int_register_cb -> should be called
+ * when no longer required.
+ */
+int qed_int_unregister_cb(struct qed_hwfn *p_hwfn,
+ u8 pi);
+
+/**
+ * qed_int_get_sp_sb_id(): Get the slowhwfn sb id.
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Return: u16.
+ */
+u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn);
+
+/**
+ * qed_int_igu_init_pure_rt_single(): Status block cleanup.
+ * Should be called for each status
+ * block that will be used -> both PF / VF.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @igu_sb_id: IGU status block id.
+ * @opaque: Opaque fid of the sb owner.
+ * @b_set: Set(1) / Clear(0).
+ *
+ * Return: Void.
+ */
+void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 igu_sb_id,
+ u16 opaque,
+ bool b_set);
+
+/**
+ * qed_int_cau_conf_sb(): Configure cau for a given status block.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @sb_phys: SB Physical.
+ * @igu_sb_id: IGU status block id.
+ * @vf_number: VF number
+ * @vf_valid: VF valid or not.
+ *
+ * Return: Void.
+ */
+void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ dma_addr_t sb_phys,
+ u16 igu_sb_id,
+ u16 vf_number,
+ u8 vf_valid);
+
+/**
+ * qed_int_alloc(): QED interrupt alloc.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ *
+ * Return: Int.
+ */
+int qed_int_alloc(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * qed_int_free(): QED interrupt free.
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Return: Void.
+ */
+void qed_int_free(struct qed_hwfn *p_hwfn);
+
+/**
+ * qed_int_setup(): QED interrupt setup.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ *
+ * Return: Void.
+ */
+void qed_int_setup(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * qed_int_igu_enable(): Enable Interrupt & Attention for hw function.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @int_mode: Interrut mode
+ *
+ * Return: Int.
+ */
+int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ enum qed_int_mode int_mode);
+
+/**
+ * qed_init_cau_sb_entry(): Initialize CAU status block entry.
+ *
+ * @p_hwfn: HW device data.
+ * @p_sb_entry: Pointer SB entry.
+ * @pf_id: PF number
+ * @vf_number: VF number
+ * @vf_valid: VF valid or not.
+ *
+ * Return: Void.
+ */
+void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
+ struct cau_sb_entry *p_sb_entry,
+ u8 pf_id,
+ u16 vf_number,
+ u8 vf_valid);
+
+int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u8 timer_res, u16 sb_id, bool tx);
+
+#define QED_MAPPING_MEMORY_SIZE(dev) (NUM_OF_SBS(dev))
+
+int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ bool hw_init);
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iro_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_iro_hsi.h
new file mode 100644
index 000000000..3ccdd3b1d
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_iro_hsi.h
@@ -0,0 +1,500 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* QLogic qed NIC Driver
+ * Copyright (c) 2019-2021 Marvell International Ltd.
+ */
+
+#ifndef _QED_IRO_HSI_H
+#define _QED_IRO_HSI_H
+
+#include <linux/types.h>
+
+enum {
+ IRO_YSTORM_FLOW_CONTROL_MODE_GTT,
+ IRO_PSTORM_PKT_DUPLICATION_CFG,
+ IRO_TSTORM_PORT_STAT,
+ IRO_TSTORM_LL2_PORT_STAT,
+ IRO_TSTORM_PKT_DUPLICATION_CFG,
+ IRO_USTORM_VF_PF_CHANNEL_READY_GTT,
+ IRO_USTORM_FLR_FINAL_ACK_GTT,
+ IRO_USTORM_EQE_CONS_GTT,
+ IRO_USTORM_ETH_QUEUE_ZONE_GTT,
+ IRO_USTORM_COMMON_QUEUE_CONS_GTT,
+ IRO_XSTORM_PQ_INFO,
+ IRO_XSTORM_INTEG_TEST_DATA,
+ IRO_YSTORM_INTEG_TEST_DATA,
+ IRO_PSTORM_INTEG_TEST_DATA,
+ IRO_TSTORM_INTEG_TEST_DATA,
+ IRO_MSTORM_INTEG_TEST_DATA,
+ IRO_USTORM_INTEG_TEST_DATA,
+ IRO_XSTORM_OVERLAY_BUF_ADDR,
+ IRO_YSTORM_OVERLAY_BUF_ADDR,
+ IRO_PSTORM_OVERLAY_BUF_ADDR,
+ IRO_TSTORM_OVERLAY_BUF_ADDR,
+ IRO_MSTORM_OVERLAY_BUF_ADDR,
+ IRO_USTORM_OVERLAY_BUF_ADDR,
+ IRO_TSTORM_LL2_RX_PRODS_GTT,
+ IRO_CORE_LL2_TSTORM_PER_QUEUE_STAT,
+ IRO_CORE_LL2_USTORM_PER_QUEUE_STAT,
+ IRO_CORE_LL2_PSTORM_PER_QUEUE_STAT,
+ IRO_MSTORM_QUEUE_STAT,
+ IRO_MSTORM_TPA_TIMEOUT_US,
+ IRO_MSTORM_ETH_VF_PRODS,
+ IRO_MSTORM_ETH_PF_PRODS_GTT,
+ IRO_MSTORM_ETH_PF_STAT,
+ IRO_USTORM_QUEUE_STAT,
+ IRO_USTORM_ETH_PF_STAT,
+ IRO_PSTORM_QUEUE_STAT,
+ IRO_PSTORM_ETH_PF_STAT,
+ IRO_PSTORM_CTL_FRAME_ETHTYPE_GTT,
+ IRO_TSTORM_ETH_PRS_INPUT,
+ IRO_ETH_RX_RATE_LIMIT,
+ IRO_TSTORM_ETH_RSS_UPDATE_GTT,
+ IRO_XSTORM_ETH_QUEUE_ZONE_GTT,
+ IRO_YSTORM_TOE_CQ_PROD,
+ IRO_USTORM_TOE_CQ_PROD,
+ IRO_USTORM_TOE_GRQ_PROD,
+ IRO_TSTORM_SCSI_CMDQ_CONS_GTT,
+ IRO_TSTORM_SCSI_BDQ_EXT_PROD_GTT,
+ IRO_MSTORM_SCSI_BDQ_EXT_PROD_GTT,
+ IRO_TSTORM_ISCSI_RX_STATS,
+ IRO_MSTORM_ISCSI_RX_STATS,
+ IRO_USTORM_ISCSI_RX_STATS,
+ IRO_XSTORM_ISCSI_TX_STATS,
+ IRO_YSTORM_ISCSI_TX_STATS,
+ IRO_PSTORM_ISCSI_TX_STATS,
+ IRO_TSTORM_FCOE_RX_STATS,
+ IRO_PSTORM_FCOE_TX_STATS,
+ IRO_PSTORM_RDMA_QUEUE_STAT,
+ IRO_TSTORM_RDMA_QUEUE_STAT,
+ IRO_XSTORM_RDMA_ASSERT_LEVEL,
+ IRO_YSTORM_RDMA_ASSERT_LEVEL,
+ IRO_PSTORM_RDMA_ASSERT_LEVEL,
+ IRO_TSTORM_RDMA_ASSERT_LEVEL,
+ IRO_MSTORM_RDMA_ASSERT_LEVEL,
+ IRO_USTORM_RDMA_ASSERT_LEVEL,
+ IRO_XSTORM_IWARP_RXMIT_STATS,
+ IRO_TSTORM_ROCE_EVENTS_STAT,
+ IRO_YSTORM_ROCE_DCQCN_RECEIVED_STATS,
+ IRO_YSTORM_ROCE_ERROR_STATS,
+ IRO_PSTORM_ROCE_DCQCN_SENT_STATS,
+ IRO_USTORM_ROCE_CQE_STATS,
+};
+
+/* Pstorm LiteL2 queue statistics */
+
+#define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) \
+ (IRO[IRO_CORE_LL2_PSTORM_PER_QUEUE_STAT].base \
+ + ((core_tx_stats_id) * IRO[IRO_CORE_LL2_PSTORM_PER_QUEUE_STAT].m1))
+#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE \
+ (IRO[IRO_CORE_LL2_PSTORM_PER_QUEUE_STAT].size)
+
+/* Tstorm LightL2 queue statistics */
+#define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
+ (IRO[IRO_CORE_LL2_TSTORM_PER_QUEUE_STAT].base \
+ + ((core_rx_queue_id) * IRO[IRO_CORE_LL2_TSTORM_PER_QUEUE_STAT].m1))
+#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE \
+ (IRO[IRO_CORE_LL2_TSTORM_PER_QUEUE_STAT].size)
+
+/* Ustorm LiteL2 queue statistics */
+#define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
+ (IRO[IRO_CORE_LL2_USTORM_PER_QUEUE_STAT].base \
+ + ((core_rx_queue_id) * IRO[IRO_CORE_LL2_USTORM_PER_QUEUE_STAT].m1))
+#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE \
+ (IRO[IRO_CORE_LL2_USTORM_PER_QUEUE_STAT].size)
+
+/* Tstorm Eth limit Rx rate */
+#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) \
+ (IRO[IRO_ETH_RX_RATE_LIMIT].base \
+ + ((pf_id) * IRO[IRO_ETH_RX_RATE_LIMIT].m1))
+#define ETH_RX_RATE_LIMIT_SIZE (IRO[IRO_ETH_RX_RATE_LIMIT].size)
+
+/* Mstorm ETH PF queues producers */
+#define MSTORM_ETH_PF_PRODS_GTT_OFFSET(queue_id) \
+ (IRO[IRO_MSTORM_ETH_PF_PRODS_GTT].base \
+ + ((queue_id) * IRO[IRO_MSTORM_ETH_PF_PRODS_GTT].m1))
+#define MSTORM_ETH_PF_PRODS_GTT_SIZE (IRO[IRO_MSTORM_ETH_PF_PRODS_GTT].size)
+
+/* Mstorm pf statistics */
+#define MSTORM_ETH_PF_STAT_OFFSET(pf_id) \
+ (IRO[IRO_MSTORM_ETH_PF_STAT].base \
+ + ((pf_id) * IRO[IRO_MSTORM_ETH_PF_STAT].m1))
+#define MSTORM_ETH_PF_STAT_SIZE (IRO[IRO_MSTORM_ETH_PF_STAT].size)
+
+/* Mstorm ETH VF queues producers offset in RAM. Used in default VF zone
+ * size mode.
+ */
+#define MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id) \
+ (IRO[IRO_MSTORM_ETH_VF_PRODS].base \
+ + ((vf_id) * IRO[IRO_MSTORM_ETH_VF_PRODS].m1) \
+ + ((vf_queue_id) * IRO[IRO_MSTORM_ETH_VF_PRODS].m2))
+#define MSTORM_ETH_VF_PRODS_SIZE (IRO[IRO_MSTORM_ETH_VF_PRODS].size)
+
+/* Mstorm Integration Test Data */
+#define MSTORM_INTEG_TEST_DATA_OFFSET (IRO[IRO_MSTORM_INTEG_TEST_DATA].base)
+#define MSTORM_INTEG_TEST_DATA_SIZE (IRO[IRO_MSTORM_INTEG_TEST_DATA].size)
+
+/* Mstorm iSCSI RX stats */
+#define MSTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \
+ (IRO[IRO_MSTORM_ISCSI_RX_STATS].base \
+ + ((storage_func_id) * IRO[IRO_MSTORM_ISCSI_RX_STATS].m1))
+#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[IRO_MSTORM_ISCSI_RX_STATS].size)
+
+/* Mstorm overlay buffer host address */
+#define MSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[IRO_MSTORM_OVERLAY_BUF_ADDR].base)
+#define MSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[IRO_MSTORM_OVERLAY_BUF_ADDR].size)
+
+/* Mstorm queue statistics */
+#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+ (IRO[IRO_MSTORM_QUEUE_STAT].base \
+ + ((stat_counter_id) * IRO[IRO_MSTORM_QUEUE_STAT].m1))
+#define MSTORM_QUEUE_STAT_SIZ (IRO[IRO_MSTORM_QUEUE_STAT].size)
+
+/* Mstorm error level for assert */
+#define MSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
+ (IRO[IRO_MSTORM_RDMA_ASSERT_LEVEL].base \
+ + ((pf_id) * IRO[IRO_MSTORM_RDMA_ASSERT_LEVEL].m1))
+#define MSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[IRO_MSTORM_RDMA_ASSERT_LEVEL].size)
+
+/* Mstorm bdq-external-producer of given BDQ resource ID, BDqueue-id */
+#define MSTORM_SCSI_BDQ_EXT_PROD_GTT_OFFSET(storage_func_id, bdq_id) \
+ (IRO[IRO_MSTORM_SCSI_BDQ_EXT_PROD_GTT].base \
+ + ((storage_func_id) * IRO[IRO_MSTORM_SCSI_BDQ_EXT_PROD_GTT].m1) \
+ + ((bdq_id) * IRO[IRO_MSTORM_SCSI_BDQ_EXT_PROD_GTT].m2))
+#define MSTORM_SCSI_BDQ_EXT_PROD_GTT_SIZE \
+ (IRO[IRO_MSTORM_SCSI_BDQ_EXT_PROD_GTT].size)
+
+/* TPA agregation timeout in us resolution (on ASIC) */
+#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[IRO_MSTORM_TPA_TIMEOUT_US].base)
+#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[IRO_MSTORM_TPA_TIMEOUT_US].size)
+
+/* Control frame's EthType configuration for TX control frame security */
+#define PSTORM_CTL_FRAME_ETHTYPE_GTT_OFFSET(ethtype_id) \
+ (IRO[IRO_PSTORM_CTL_FRAME_ETHTYPE_GTT].base \
+ + ((ethtype_id) * IRO[IRO_PSTORM_CTL_FRAME_ETHTYPE_GTT].m1))
+#define PSTORM_CTL_FRAME_ETHTYPE_GTT_SIZE \
+ (IRO[IRO_PSTORM_CTL_FRAME_ETHTYPE_GTT].size)
+
+/* Pstorm pf statistics */
+#define PSTORM_ETH_PF_STAT_OFFSET(pf_id) \
+ (IRO[IRO_PSTORM_ETH_PF_STAT].base \
+ + ((pf_id) * IRO[IRO_PSTORM_ETH_PF_STAT].m1))
+#define PSTORM_ETH_PF_STAT_SIZE (IRO[IRO_PSTORM_ETH_PF_STAT].size)
+
+/* Pstorm FCoE TX stats */
+#define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) \
+ (IRO[IRO_PSTORM_FCOE_TX_STATS].base \
+ + ((pf_id) * IRO[IRO_PSTORM_FCOE_TX_STATS].m1))
+#define PSTORM_FCOE_TX_STATS_SIZE (IRO[IRO_PSTORM_FCOE_TX_STATS].size)
+
+/* Pstorm Integration Test Data */
+#define PSTORM_INTEG_TEST_DATA_OFFSET (IRO[IRO_PSTORM_INTEG_TEST_DATA].base)
+#define PSTORM_INTEG_TEST_DATA_SIZE (IRO[IRO_PSTORM_INTEG_TEST_DATA].size)
+
+/* Pstorm iSCSI TX stats */
+#define PSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \
+ (IRO[IRO_PSTORM_ISCSI_TX_STATS].base \
+ + ((storage_func_id) * IRO[IRO_PSTORM_ISCSI_TX_STATS].m1))
+#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[IRO_PSTORM_ISCSI_TX_STATS].size)
+
+/* Pstorm overlay buffer host address */
+#define PSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[IRO_PSTORM_OVERLAY_BUF_ADDR].base)
+#define PSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[IRO_PSTORM_OVERLAY_BUF_ADDR].size)
+
+/* Pstorm LL2 packet duplication configuration. Use pstorm_pkt_dup_cfg
+ * data type.
+ */
+#define PSTORM_PKT_DUPLICATION_CFG_OFFSET(pf_id) \
+ (IRO[IRO_PSTORM_PKT_DUPLICATION_CFG].base \
+ + ((pf_id) * IRO[IRO_PSTORM_PKT_DUPLICATION_CFG].m1))
+#define PSTORM_PKT_DUPLICATION_CFG_SIZE \
+ (IRO[IRO_PSTORM_PKT_DUPLICATION_CFG].size)
+
+/* Pstorm queue statistics */
+#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+ (IRO[IRO_PSTORM_QUEUE_STAT].base \
+ + ((stat_counter_id) * IRO[IRO_PSTORM_QUEUE_STAT].m1))
+#define PSTORM_QUEUE_STAT_SIZE (IRO[IRO_PSTORM_QUEUE_STAT].size)
+
+/* Pstorm error level for assert */
+#define PSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
+ (IRO[IRO_PSTORM_RDMA_ASSERT_LEVEL].base \
+ + ((pf_id) * IRO[IRO_PSTORM_RDMA_ASSERT_LEVEL].m1))
+#define PSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[IRO_PSTORM_RDMA_ASSERT_LEVEL].size)
+
+/* Pstorm RDMA queue statistics */
+#define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
+ (IRO[IRO_PSTORM_RDMA_QUEUE_STAT].base \
+ + ((rdma_stat_counter_id) * IRO[IRO_PSTORM_RDMA_QUEUE_STAT].m1))
+#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[IRO_PSTORM_RDMA_QUEUE_STAT].size)
+
+/* DCQCN Sent Statistics */
+#define PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) \
+ (IRO[IRO_PSTORM_ROCE_DCQCN_SENT_STATS].base \
+ + ((roce_pf_id) * IRO[IRO_PSTORM_ROCE_DCQCN_SENT_STATS].m1))
+#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE \
+ (IRO[IRO_PSTORM_ROCE_DCQCN_SENT_STATS].size)
+
+/* Tstorm last parser message */
+#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[IRO_TSTORM_ETH_PRS_INPUT].base)
+#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[IRO_TSTORM_ETH_PRS_INPUT].size)
+
+/* RSS indirection table entry update command per PF offset in TSTORM PF BAR0.
+ * Use eth_tstorm_rss_update_data for update.
+ */
+#define TSTORM_ETH_RSS_UPDATE_GTT_OFFSET(pf_id) \
+ (IRO[IRO_TSTORM_ETH_RSS_UPDATE_GTT].base \
+ + ((pf_id) * IRO[IRO_TSTORM_ETH_RSS_UPDATE_GTT].m1))
+#define TSTORM_ETH_RSS_UPDATE_GTT_SIZE\
+ (IRO[IRO_TSTORM_ETH_RSS_UPDATE_GTT].size)
+
+/* Tstorm FCoE RX stats */
+#define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) \
+ (IRO[IRO_TSTORM_FCOE_RX_STATS].base \
+ + ((pf_id) * IRO[IRO_TSTORM_FCOE_RX_STATS].m1))
+#define TSTORM_FCOE_RX_STATS_SIZE (IRO[IRO_TSTORM_FCOE_RX_STATS].size)
+
+/* Tstorm Integration Test Data */
+#define TSTORM_INTEG_TEST_DATA_OFFSET (IRO[IRO_TSTORM_INTEG_TEST_DATA].base)
+#define TSTORM_INTEG_TEST_DATA_SIZE (IRO[IRO_TSTORM_INTEG_TEST_DATA].size)
+
+/* Tstorm iSCSI RX stats */
+#define TSTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \
+ (IRO[IRO_TSTORM_ISCSI_RX_STATS].base \
+ + ((storage_func_id) * IRO[IRO_TSTORM_ISCSI_RX_STATS].m1))
+#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[IRO_TSTORM_ISCSI_RX_STATS].size)
+
+/* Tstorm ll2 port statistics */
+#define TSTORM_LL2_PORT_STAT_OFFSET(port_id) \
+ (IRO[IRO_TSTORM_LL2_PORT_STAT].base \
+ + ((port_id) * IRO[IRO_TSTORM_LL2_PORT_STAT].m1))
+#define TSTORM_LL2_PORT_STAT_SIZE (IRO[IRO_TSTORM_LL2_PORT_STAT].size)
+
+/* Tstorm producers */
+#define TSTORM_LL2_RX_PRODS_GTT_OFFSET(core_rx_queue_id) \
+ (IRO[IRO_TSTORM_LL2_RX_PRODS_GTT].base \
+ + ((core_rx_queue_id) * IRO[IRO_TSTORM_LL2_RX_PRODS_GTT].m1))
+#define TSTORM_LL2_RX_PRODS_GTT_SIZE (IRO[IRO_TSTORM_LL2_RX_PRODS_GTT].size)
+
+/* Tstorm overlay buffer host address */
+#define TSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[IRO_TSTORM_OVERLAY_BUF_ADDR].base)
+
+#define TSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[IRO_TSTORM_OVERLAY_BUF_ADDR].size)
+
+/* Tstorm LL2 packet duplication configuration.
+ * Use tstorm_pkt_dup_cfg data type.
+ */
+#define TSTORM_PKT_DUPLICATION_CFG_OFFSET(pf_id) \
+ (IRO[IRO_TSTORM_PKT_DUPLICATION_CFG].base \
+ + ((pf_id) * IRO[IRO_TSTORM_PKT_DUPLICATION_CFG].m1))
+#define TSTORM_PKT_DUPLICATION_CFG_SIZE \
+ (IRO[IRO_TSTORM_PKT_DUPLICATION_CFG].size)
+
+/* Tstorm port statistics */
+#define TSTORM_PORT_STAT_OFFSET(port_id) \
+ (IRO[IRO_TSTORM_PORT_STAT].base \
+ + ((port_id) * IRO[IRO_TSTORM_PORT_STAT].m1))
+#define TSTORM_PORT_STAT_SIZE (IRO[IRO_TSTORM_PORT_STAT].size)
+
+/* Tstorm error level for assert */
+#define TSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
+ (IRO[IRO_TSTORM_RDMA_ASSERT_LEVEL].base \
+ + ((pf_id) * IRO[IRO_TSTORM_RDMA_ASSERT_LEVEL].m1))
+#define TSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[IRO_TSTORM_RDMA_ASSERT_LEVEL].size)
+
+/* Tstorm RDMA queue statistics */
+#define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
+ (IRO[IRO_TSTORM_RDMA_QUEUE_STAT].base \
+ + ((rdma_stat_counter_id) * IRO[IRO_TSTORM_RDMA_QUEUE_STAT].m1))
+#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[IRO_TSTORM_RDMA_QUEUE_STAT].size)
+
+/* Tstorm RoCE Event Statistics */
+#define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) \
+ (IRO[IRO_TSTORM_ROCE_EVENTS_STAT].base \
+ + ((roce_pf_id) * IRO[IRO_TSTORM_ROCE_EVENTS_STAT].m1))
+#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[IRO_TSTORM_ROCE_EVENTS_STAT].size)
+
+/* Tstorm (reflects M-Storm) bdq-external-producer of given function ID,
+ * BDqueue-id.
+ */
+#define TSTORM_SCSI_BDQ_EXT_PROD_GTT_OFFSET(storage_func_id, bdq_id) \
+ (IRO[IRO_TSTORM_SCSI_BDQ_EXT_PROD_GTT].base \
+ + ((storage_func_id) * IRO[IRO_TSTORM_SCSI_BDQ_EXT_PROD_GTT].m1) \
+ + ((bdq_id) * IRO[IRO_TSTORM_SCSI_BDQ_EXT_PROD_GTT].m2))
+#define TSTORM_SCSI_BDQ_EXT_PROD_GTT_SIZE \
+ (IRO[IRO_TSTORM_SCSI_BDQ_EXT_PROD_GTT].size)
+
+/* Tstorm cmdq-cons of given command queue-id */
+#define TSTORM_SCSI_CMDQ_CONS_GTT_OFFSET(cmdq_queue_id) \
+ (IRO[IRO_TSTORM_SCSI_CMDQ_CONS_GTT].base \
+ + ((cmdq_queue_id) * IRO[IRO_TSTORM_SCSI_CMDQ_CONS_GTT].m1))
+#define TSTORM_SCSI_CMDQ_CONS_GTT_SIZE \
+ (IRO[IRO_TSTORM_SCSI_CMDQ_CONS_GTT].size)
+
+/* Ustorm Common Queue ring consumer */
+#define USTORM_COMMON_QUEUE_CONS_GTT_OFFSET(queue_zone_id) \
+ (IRO[IRO_USTORM_COMMON_QUEUE_CONS_GTT].base \
+ + ((queue_zone_id) * IRO[IRO_USTORM_COMMON_QUEUE_CONS_GTT].m1))
+#define USTORM_COMMON_QUEUE_CONS_GTT_SIZE \
+ (IRO[IRO_USTORM_COMMON_QUEUE_CONS_GTT].size)
+
+/* Ustorm Event ring consumer */
+#define USTORM_EQE_CONS_GTT_OFFSET(pf_id) \
+ (IRO[IRO_USTORM_EQE_CONS_GTT].base \
+ + ((pf_id) * IRO[IRO_USTORM_EQE_CONS_GTT].m1))
+#define USTORM_EQE_CONS_GTT_SIZE (IRO[IRO_USTORM_EQE_CONS_GTT].size)
+
+/* Ustorm pf statistics */
+#define USTORM_ETH_PF_STAT_OFFSET(pf_id) \
+ (IRO[IRO_USTORM_ETH_PF_STAT].base \
+ + ((pf_id) * IRO[IRO_USTORM_ETH_PF_STAT].m1))
+#define USTORM_ETH_PF_STAT_SIZE (IRO[IRO_USTORM_ETH_PF_STAT].size)
+
+/* Ustorm eth queue zone */
+#define USTORM_ETH_QUEUE_ZONE_GTT_OFFSET(queue_zone_id) \
+ (IRO[IRO_USTORM_ETH_QUEUE_ZONE_GTT].base \
+ + ((queue_zone_id) * IRO[IRO_USTORM_ETH_QUEUE_ZONE_GTT].m1))
+#define USTORM_ETH_QUEUE_ZONE_GTT_SIZE (IRO[IRO_USTORM_ETH_QUEUE_ZONE_GTT].size)
+
+/* Ustorm Final flr cleanup ack */
+#define USTORM_FLR_FINAL_ACK_GTT_OFFSET(pf_id) \
+ (IRO[IRO_USTORM_FLR_FINAL_ACK_GTT].base \
+ + ((pf_id) * IRO[IRO_USTORM_FLR_FINAL_ACK_GTT].m1))
+#define USTORM_FLR_FINAL_ACK_GTT_SIZE (IRO[IRO_USTORM_FLR_FINAL_ACK_GTT].size)
+
+/* Ustorm Integration Test Data */
+#define USTORM_INTEG_TEST_DATA_OFFSET (IRO[IRO_USTORM_INTEG_TEST_DATA].base)
+#define USTORM_INTEG_TEST_DATA_SIZE (IRO[IRO_USTORM_INTEG_TEST_DATA].size)
+
+/* Ustorm iSCSI RX stats */
+#define USTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \
+ (IRO[IRO_USTORM_ISCSI_RX_STATS].base \
+ + ((storage_func_id) * IRO[IRO_USTORM_ISCSI_RX_STATS].m1))
+#define USTORM_ISCSI_RX_STATS_SIZE (IRO[IRO_USTORM_ISCSI_RX_STATS].size)
+
+/* Ustorm overlay buffer host address */
+#define USTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[IRO_USTORM_OVERLAY_BUF_ADDR].base)
+#define USTORM_OVERLAY_BUF_ADDR_SIZE (IRO[IRO_USTORM_OVERLAY_BUF_ADDR].size)
+
+/* Ustorm queue statistics */
+#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+ (IRO[IRO_USTORM_QUEUE_STAT].base \
+ + ((stat_counter_id) * IRO[IRO_USTORM_QUEUE_STAT].m1))
+#define USTORM_QUEUE_STAT_SIZE (IRO[IRO_USTORM_QUEUE_STAT].size)
+
+/* Ustorm error level for assert */
+#define USTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
+ (IRO[IRO_USTORM_RDMA_ASSERT_LEVEL].base \
+ + ((pf_id) * IRO[IRO_USTORM_RDMA_ASSERT_LEVEL].m1))
+#define USTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[IRO_USTORM_RDMA_ASSERT_LEVEL].size)
+
+/* RoCE CQEs Statistics */
+#define USTORM_ROCE_CQE_STATS_OFFSET(roce_pf_id) \
+ (IRO[IRO_USTORM_ROCE_CQE_STATS].base \
+ + ((roce_pf_id) * IRO[IRO_USTORM_ROCE_CQE_STATS].m1))
+#define USTORM_ROCE_CQE_STATS_SIZE (IRO[IRO_USTORM_ROCE_CQE_STATS].size)
+
+/* Ustorm cqe producer */
+#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) \
+ (IRO[IRO_USTORM_TOE_CQ_PROD].base \
+ + ((rss_id) * IRO[IRO_USTORM_TOE_CQ_PROD].m1))
+#define USTORM_TOE_CQ_PROD_SIZE (IRO[IRO_USTORM_TOE_CQ_PROD].size)
+
+/* Ustorm grq producer */
+#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) \
+ (IRO[IRO_USTORM_TOE_GRQ_PROD].base \
+ + ((pf_id) * IRO[IRO_USTORM_TOE_GRQ_PROD].m1))
+#define USTORM_TOE_GRQ_PROD_SIZE (IRO[IRO_USTORM_TOE_GRQ_PROD].size)
+
+/* Ustorm VF-PF Channel ready flag */
+#define USTORM_VF_PF_CHANNEL_READY_GTT_OFFSET(vf_id) \
+ (IRO[IRO_USTORM_VF_PF_CHANNEL_READY_GTT].base \
+ + ((vf_id) * IRO[IRO_USTORM_VF_PF_CHANNEL_READY_GTT].m1))
+#define USTORM_VF_PF_CHANNEL_READY_GTT_SIZE \
+ (IRO[IRO_USTORM_VF_PF_CHANNEL_READY_GTT].size)
+
+/* Xstorm queue zone */
+#define XSTORM_ETH_QUEUE_ZONE_GTT_OFFSET(queue_id) \
+ (IRO[IRO_XSTORM_ETH_QUEUE_ZONE_GTT].base \
+ + ((queue_id) * IRO[IRO_XSTORM_ETH_QUEUE_ZONE_GTT].m1))
+#define XSTORM_ETH_QUEUE_ZONE_GTT_SIZE (IRO[IRO_XSTORM_ETH_QUEUE_ZONE_GTT].size)
+
+/* Xstorm Integration Test Data */
+#define XSTORM_INTEG_TEST_DATA_OFFSET (IRO[IRO_XSTORM_INTEG_TEST_DATA].base)
+#define XSTORM_INTEG_TEST_DATA_SIZE (IRO[IRO_XSTORM_INTEG_TEST_DATA].size)
+
+/* Xstorm iSCSI TX stats */
+#define XSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \
+ (IRO[IRO_XSTORM_ISCSI_TX_STATS].base \
+ + ((storage_func_id) * IRO[IRO_XSTORM_ISCSI_TX_STATS].m1))
+#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[IRO_XSTORM_ISCSI_TX_STATS].size)
+
+/* Xstorm iWARP rxmit stats */
+#define XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) \
+ (IRO[IRO_XSTORM_IWARP_RXMIT_STATS].base \
+ + ((pf_id) * IRO[IRO_XSTORM_IWARP_RXMIT_STATS].m1))
+#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[IRO_XSTORM_IWARP_RXMIT_STATS].size)
+
+/* Xstorm overlay buffer host address */
+#define XSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[IRO_XSTORM_OVERLAY_BUF_ADDR].base)
+#define XSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[IRO_XSTORM_OVERLAY_BUF_ADDR].size)
+
+/* Xstorm common PQ info */
+#define XSTORM_PQ_INFO_OFFSET(pq_id) \
+ (IRO[IRO_XSTORM_PQ_INFO].base \
+ + ((pq_id) * IRO[IRO_XSTORM_PQ_INFO].m1))
+#define XSTORM_PQ_INFO_SIZE (IRO[IRO_XSTORM_PQ_INFO].size)
+
+/* Xstorm error level for assert */
+#define XSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
+ (IRO[IRO_XSTORM_RDMA_ASSERT_LEVEL].base \
+ + ((pf_id) * IRO[IRO_XSTORM_RDMA_ASSERT_LEVEL].m1))
+#define XSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[IRO_XSTORM_RDMA_ASSERT_LEVEL].size)
+
+/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */
+#define YSTORM_FLOW_CONTROL_MODE_GTT_OFFSET \
+ (IRO[IRO_YSTORM_FLOW_CONTROL_MODE_GTT].base)
+#define YSTORM_FLOW_CONTROL_MODE_GTT_SIZE \
+ (IRO[IRO_YSTORM_FLOW_CONTROL_MODE_GTT].size)
+
+/* Ystorm Integration Test Data */
+#define YSTORM_INTEG_TEST_DATA_OFFSET (IRO[IRO_YSTORM_INTEG_TEST_DATA].base)
+#define YSTORM_INTEG_TEST_DATA_SIZE (IRO[IRO_YSTORM_INTEG_TEST_DATA].size)
+
+/* Ystorm iSCSI TX stats */
+#define YSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \
+ (IRO[IRO_YSTORM_ISCSI_TX_STATS].base \
+ + ((storage_func_id) * IRO[IRO_YSTORM_ISCSI_TX_STATS].m1))
+#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[IRO_YSTORM_ISCSI_TX_STATS].size)
+
+/* Ystorm overlay buffer host address */
+#define YSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[IRO_YSTORM_OVERLAY_BUF_ADDR].base)
+#define YSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[IRO_YSTORM_OVERLAY_BUF_ADDR].size)
+
+/* Ystorm error level for assert */
+#define YSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
+ (IRO[IRO_YSTORM_RDMA_ASSERT_LEVEL].base \
+ + ((pf_id) * IRO[IRO_YSTORM_RDMA_ASSERT_LEVEL].m1))
+#define YSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[IRO_YSTORM_RDMA_ASSERT_LEVEL].size)
+
+/* DCQCN Received Statistics */
+#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id) \
+ (IRO[IRO_YSTORM_ROCE_DCQCN_RECEIVED_STATS].base \
+ + ((roce_pf_id) * IRO[IRO_YSTORM_ROCE_DCQCN_RECEIVED_STATS].m1))
+#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE \
+ (IRO[IRO_YSTORM_ROCE_DCQCN_RECEIVED_STATS].size)
+
+/* RoCE Error Statistics */
+#define YSTORM_ROCE_ERROR_STATS_OFFSET(roce_pf_id) \
+ (IRO[IRO_YSTORM_ROCE_ERROR_STATS].base \
+ + ((roce_pf_id) * IRO[IRO_YSTORM_ROCE_ERROR_STATS].m1))
+#define YSTORM_ROCE_ERROR_STATS_SIZE (IRO[IRO_YSTORM_ROCE_ERROR_STATS].size)
+
+/* Ystorm cqe producer */
+#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) \
+ (IRO[IRO_YSTORM_TOE_CQ_PROD].base \
+ + ((rss_id) * IRO[IRO_YSTORM_TOE_CQ_PROD].m1))
+#define YSTORM_TOE_CQ_PROD_SIZE (IRO[IRO_YSTORM_TOE_CQ_PROD].size)
+
+/* Per-chip offsets in iro_arr in dwords */
+#define E4_IRO_ARR_OFFSET 0
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
new file mode 100644
index 000000000..980e7289b
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
@@ -0,0 +1,1415 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ * Copyright (c) 2019-2020 Marvell International Ltd.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <asm/param.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/log2.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/qed/qed_iscsi_if.h>
+#include "qed.h"
+#include "qed_cxt.h"
+#include "qed_dev_api.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_int.h"
+#include "qed_iro_hsi.h"
+#include "qed_iscsi.h"
+#include "qed_ll2.h"
+#include "qed_mcp.h"
+#include "qed_sp.h"
+#include "qed_sriov.h"
+#include "qed_reg_addr.h"
+
+struct qed_iscsi_conn {
+ struct list_head list_entry;
+ bool free_on_delete;
+
+ u16 conn_id;
+ u32 icid;
+ u32 fw_cid;
+
+ u8 layer_code;
+ u8 offl_flags;
+ u8 connect_mode;
+ u32 initial_ack;
+ dma_addr_t sq_pbl_addr;
+ struct qed_chain r2tq;
+ struct qed_chain xhq;
+ struct qed_chain uhq;
+
+ struct tcp_upload_params *tcp_upload_params_virt_addr;
+ dma_addr_t tcp_upload_params_phys_addr;
+ struct scsi_terminate_extra_params *queue_cnts_virt_addr;
+ dma_addr_t queue_cnts_phys_addr;
+ dma_addr_t syn_phy_addr;
+
+ u16 syn_ip_payload_length;
+ u8 local_mac[6];
+ u8 remote_mac[6];
+ u16 vlan_id;
+ u16 tcp_flags;
+ u8 ip_version;
+ u32 remote_ip[4];
+ u32 local_ip[4];
+ u8 ka_max_probe_cnt;
+ u8 dup_ack_theshold;
+ u32 rcv_next;
+ u32 snd_una;
+ u32 snd_next;
+ u32 snd_max;
+ u32 snd_wnd;
+ u32 rcv_wnd;
+ u32 snd_wl1;
+ u32 cwnd;
+ u32 ss_thresh;
+ u16 srtt;
+ u16 rtt_var;
+ u32 ts_recent;
+ u32 ts_recent_age;
+ u32 total_rt;
+ u32 ka_timeout_delta;
+ u32 rt_timeout_delta;
+ u8 dup_ack_cnt;
+ u8 snd_wnd_probe_cnt;
+ u8 ka_probe_cnt;
+ u8 rt_cnt;
+ u32 flow_label;
+ u32 ka_timeout;
+ u32 ka_interval;
+ u32 max_rt_time;
+ u32 initial_rcv_wnd;
+ u8 ttl;
+ u8 tos_or_tc;
+ u16 remote_port;
+ u16 local_port;
+ u16 mss;
+ u8 snd_wnd_scale;
+ u8 rcv_wnd_scale;
+ u16 da_timeout_value;
+ u8 ack_frequency;
+
+ u8 update_flag;
+ u8 default_cq;
+ u32 max_seq_size;
+ u32 max_recv_pdu_length;
+ u32 max_send_pdu_length;
+ u32 first_seq_length;
+ u32 exp_stat_sn;
+ u32 stat_sn;
+ u16 physical_q0;
+ u16 physical_q1;
+ u8 abortive_dsconnect;
+};
+
+static int qed_iscsi_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code,
+ __le16 echo, union event_ring_data *data,
+ u8 fw_return_code)
+{
+ if (p_hwfn->p_iscsi_info->event_cb) {
+ struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info;
+
+ return p_iscsi->event_cb(p_iscsi->event_context,
+ fw_event_code, data);
+ } else {
+ DP_NOTICE(p_hwfn, "iSCSI async completion is not set\n");
+ return -EINVAL;
+ }
+}
+
+static int
+qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_addr,
+ void *event_context, iscsi_event_cb_t async_event_cb)
+{
+ struct iscsi_init_ramrod_params *p_ramrod = NULL;
+ struct scsi_init_func_queues *p_queue = NULL;
+ struct qed_iscsi_pf_params *p_params = NULL;
+ struct iscsi_spe_func_init *p_init = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc = 0;
+ u32 dval;
+ u16 val;
+ u8 i;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_addr;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ ISCSI_RAMROD_CMD_ID_INIT_FUNC,
+ PROTOCOLID_TCP_ULP, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.iscsi_init;
+ p_init = &p_ramrod->iscsi_init_spe;
+ p_params = &p_hwfn->pf_params.iscsi_pf_params;
+ p_queue = &p_init->q_params;
+
+ /* Sanity */
+ if (p_params->num_queues > p_hwfn->hw_info.feat_num[QED_ISCSI_CQ]) {
+ DP_ERR(p_hwfn,
+ "Cannot satisfy CQ amount. Queues requested %d, CQs available %d. Aborting function start\n",
+ p_params->num_queues,
+ p_hwfn->hw_info.feat_num[QED_ISCSI_CQ]);
+ qed_sp_destroy_request(p_hwfn, p_ent);
+ return -EINVAL;
+ }
+
+ val = p_params->half_way_close_timeout;
+ p_init->half_way_close_timeout = cpu_to_le16(val);
+ p_init->num_sq_pages_in_ring = p_params->num_sq_pages_in_ring;
+ p_init->num_r2tq_pages_in_ring = p_params->num_r2tq_pages_in_ring;
+ p_init->num_uhq_pages_in_ring = p_params->num_uhq_pages_in_ring;
+ p_init->ll2_rx_queue_id =
+ p_hwfn->hw_info.resc_start[QED_LL2_RAM_QUEUE] +
+ p_params->ll2_ooo_queue_id;
+
+ p_init->func_params.log_page_size = p_params->log_page_size;
+ val = p_params->num_tasks;
+ p_init->func_params.num_tasks = cpu_to_le16(val);
+ p_init->debug_mode.flags = p_params->debug_mode;
+
+ DMA_REGPAIR_LE(p_queue->glbl_q_params_addr,
+ p_params->glbl_q_params_addr);
+
+ val = p_params->cq_num_entries;
+ p_queue->cq_num_entries = cpu_to_le16(val);
+ val = p_params->cmdq_num_entries;
+ p_queue->cmdq_num_entries = cpu_to_le16(val);
+ p_queue->num_queues = p_params->num_queues;
+ dval = (u8)p_hwfn->hw_info.resc_start[QED_CMDQS_CQS];
+ p_queue->queue_relative_offset = (u8)dval;
+ p_queue->cq_sb_pi = p_params->gl_rq_pi;
+ p_queue->cmdq_sb_pi = p_params->gl_cmd_pi;
+
+ for (i = 0; i < p_params->num_queues; i++) {
+ val = qed_get_igu_sb_id(p_hwfn, i);
+ p_queue->cq_cmdq_sb_num_arr[i] = cpu_to_le16(val);
+ }
+
+ p_queue->bdq_resource_id = (u8)RESC_START(p_hwfn, QED_BDQ);
+
+ DMA_REGPAIR_LE(p_queue->bdq_pbl_base_address[BDQ_ID_RQ],
+ p_params->bdq_pbl_base_addr[BDQ_ID_RQ]);
+ p_queue->bdq_pbl_num_entries[BDQ_ID_RQ] =
+ p_params->bdq_pbl_num_entries[BDQ_ID_RQ];
+ val = p_params->bdq_xoff_threshold[BDQ_ID_RQ];
+ p_queue->bdq_xoff_threshold[BDQ_ID_RQ] = cpu_to_le16(val);
+ val = p_params->bdq_xon_threshold[BDQ_ID_RQ];
+ p_queue->bdq_xon_threshold[BDQ_ID_RQ] = cpu_to_le16(val);
+
+ DMA_REGPAIR_LE(p_queue->bdq_pbl_base_address[BDQ_ID_IMM_DATA],
+ p_params->bdq_pbl_base_addr[BDQ_ID_IMM_DATA]);
+ p_queue->bdq_pbl_num_entries[BDQ_ID_IMM_DATA] =
+ p_params->bdq_pbl_num_entries[BDQ_ID_IMM_DATA];
+ val = p_params->bdq_xoff_threshold[BDQ_ID_IMM_DATA];
+ p_queue->bdq_xoff_threshold[BDQ_ID_IMM_DATA] = cpu_to_le16(val);
+ val = p_params->bdq_xon_threshold[BDQ_ID_IMM_DATA];
+ p_queue->bdq_xon_threshold[BDQ_ID_IMM_DATA] = cpu_to_le16(val);
+ val = p_params->rq_buffer_size;
+ p_queue->rq_buffer_size = cpu_to_le16(val);
+ if (p_params->is_target) {
+ SET_FIELD(p_queue->q_validity,
+ SCSI_INIT_FUNC_QUEUES_RQ_VALID, 1);
+ if (p_queue->bdq_pbl_num_entries[BDQ_ID_IMM_DATA])
+ SET_FIELD(p_queue->q_validity,
+ SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID, 1);
+ SET_FIELD(p_queue->q_validity,
+ SCSI_INIT_FUNC_QUEUES_CMD_VALID, 1);
+ } else {
+ SET_FIELD(p_queue->q_validity,
+ SCSI_INIT_FUNC_QUEUES_RQ_VALID, 1);
+ }
+ p_ramrod->tcp_init.two_msl_timer = cpu_to_le32(p_params->two_msl_timer);
+ val = p_params->tx_sws_timer;
+ p_ramrod->tcp_init.tx_sws_timer = cpu_to_le16(val);
+ p_ramrod->tcp_init.max_fin_rt = p_params->max_fin_rt;
+
+ p_hwfn->p_iscsi_info->event_context = event_context;
+ p_hwfn->p_iscsi_info->event_cb = async_event_cb;
+
+ qed_spq_register_async_cb(p_hwfn, PROTOCOLID_TCP_ULP,
+ qed_iscsi_async_event);
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
+ struct qed_iscsi_conn *p_conn,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_addr)
+{
+ struct iscsi_spe_conn_offload *p_ramrod = NULL;
+ struct tcp_offload_params_opt2 *p_tcp2 = NULL;
+ struct tcp_offload_params *p_tcp = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ dma_addr_t r2tq_pbl_addr;
+ dma_addr_t xhq_pbl_addr;
+ dma_addr_t uhq_pbl_addr;
+ u16 physical_q;
+ __le16 tmp;
+ int rc = 0;
+ u32 dval;
+ u16 wval;
+ u16 *p;
+ u8 i;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_conn->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_addr;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ ISCSI_RAMROD_CMD_ID_OFFLOAD_CONN,
+ PROTOCOLID_TCP_ULP, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.iscsi_conn_offload;
+
+ /* Transmission PQ is the first of the PF */
+ physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
+ p_conn->physical_q0 = physical_q;
+ p_ramrod->iscsi.physical_q0 = cpu_to_le16(physical_q);
+
+ /* iSCSI Pure-ACK PQ */
+ physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK);
+ p_conn->physical_q1 = physical_q;
+ p_ramrod->iscsi.physical_q1 = cpu_to_le16(physical_q);
+
+ p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id);
+
+ DMA_REGPAIR_LE(p_ramrod->iscsi.sq_pbl_addr, p_conn->sq_pbl_addr);
+
+ r2tq_pbl_addr = qed_chain_get_pbl_phys(&p_conn->r2tq);
+ DMA_REGPAIR_LE(p_ramrod->iscsi.r2tq_pbl_addr, r2tq_pbl_addr);
+
+ xhq_pbl_addr = qed_chain_get_pbl_phys(&p_conn->xhq);
+ DMA_REGPAIR_LE(p_ramrod->iscsi.xhq_pbl_addr, xhq_pbl_addr);
+
+ uhq_pbl_addr = qed_chain_get_pbl_phys(&p_conn->uhq);
+ DMA_REGPAIR_LE(p_ramrod->iscsi.uhq_pbl_addr, uhq_pbl_addr);
+
+ p_ramrod->iscsi.initial_ack = cpu_to_le32(p_conn->initial_ack);
+ p_ramrod->iscsi.flags = p_conn->offl_flags;
+ p_ramrod->iscsi.default_cq = p_conn->default_cq;
+ p_ramrod->iscsi.stat_sn = cpu_to_le32(p_conn->stat_sn);
+
+ if (!GET_FIELD(p_ramrod->iscsi.flags,
+ ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B)) {
+ p_tcp = &p_ramrod->tcp;
+
+ p = (u16 *)p_conn->local_mac;
+ tmp = cpu_to_le16(get_unaligned_be16(p));
+ p_tcp->local_mac_addr_hi = tmp;
+ tmp = cpu_to_le16(get_unaligned_be16(p + 1));
+ p_tcp->local_mac_addr_mid = tmp;
+ tmp = cpu_to_le16(get_unaligned_be16(p + 2));
+ p_tcp->local_mac_addr_lo = tmp;
+
+ p = (u16 *)p_conn->remote_mac;
+ tmp = cpu_to_le16(get_unaligned_be16(p));
+ p_tcp->remote_mac_addr_hi = tmp;
+ tmp = cpu_to_le16(get_unaligned_be16(p + 1));
+ p_tcp->remote_mac_addr_mid = tmp;
+ tmp = cpu_to_le16(get_unaligned_be16(p + 2));
+ p_tcp->remote_mac_addr_lo = tmp;
+
+ p_tcp->vlan_id = cpu_to_le16(p_conn->vlan_id);
+
+ p_tcp->flags = cpu_to_le16(p_conn->tcp_flags);
+ p_tcp->ip_version = p_conn->ip_version;
+ for (i = 0; i < 4; i++) {
+ dval = p_conn->remote_ip[i];
+ p_tcp->remote_ip[i] = cpu_to_le32(dval);
+ dval = p_conn->local_ip[i];
+ p_tcp->local_ip[i] = cpu_to_le32(dval);
+ }
+ p_tcp->ka_max_probe_cnt = p_conn->ka_max_probe_cnt;
+ p_tcp->dup_ack_theshold = p_conn->dup_ack_theshold;
+
+ p_tcp->rcv_next = cpu_to_le32(p_conn->rcv_next);
+ p_tcp->snd_una = cpu_to_le32(p_conn->snd_una);
+ p_tcp->snd_next = cpu_to_le32(p_conn->snd_next);
+ p_tcp->snd_max = cpu_to_le32(p_conn->snd_max);
+ p_tcp->snd_wnd = cpu_to_le32(p_conn->snd_wnd);
+ p_tcp->rcv_wnd = cpu_to_le32(p_conn->rcv_wnd);
+ p_tcp->snd_wl1 = cpu_to_le32(p_conn->snd_wl1);
+ p_tcp->cwnd = cpu_to_le32(p_conn->cwnd);
+ p_tcp->ss_thresh = cpu_to_le32(p_conn->ss_thresh);
+ p_tcp->srtt = cpu_to_le16(p_conn->srtt);
+ p_tcp->rtt_var = cpu_to_le16(p_conn->rtt_var);
+ p_tcp->ts_recent = cpu_to_le32(p_conn->ts_recent);
+ p_tcp->ts_recent_age = cpu_to_le32(p_conn->ts_recent_age);
+ p_tcp->total_rt = cpu_to_le32(p_conn->total_rt);
+ dval = p_conn->ka_timeout_delta;
+ p_tcp->ka_timeout_delta = cpu_to_le32(dval);
+ dval = p_conn->rt_timeout_delta;
+ p_tcp->rt_timeout_delta = cpu_to_le32(dval);
+ p_tcp->dup_ack_cnt = p_conn->dup_ack_cnt;
+ p_tcp->snd_wnd_probe_cnt = p_conn->snd_wnd_probe_cnt;
+ p_tcp->ka_probe_cnt = p_conn->ka_probe_cnt;
+ p_tcp->rt_cnt = p_conn->rt_cnt;
+ p_tcp->flow_label = cpu_to_le32(p_conn->flow_label);
+ p_tcp->ka_timeout = cpu_to_le32(p_conn->ka_timeout);
+ p_tcp->ka_interval = cpu_to_le32(p_conn->ka_interval);
+ p_tcp->max_rt_time = cpu_to_le32(p_conn->max_rt_time);
+ dval = p_conn->initial_rcv_wnd;
+ p_tcp->initial_rcv_wnd = cpu_to_le32(dval);
+ p_tcp->ttl = p_conn->ttl;
+ p_tcp->tos_or_tc = p_conn->tos_or_tc;
+ p_tcp->remote_port = cpu_to_le16(p_conn->remote_port);
+ p_tcp->local_port = cpu_to_le16(p_conn->local_port);
+ p_tcp->mss = cpu_to_le16(p_conn->mss);
+ p_tcp->snd_wnd_scale = p_conn->snd_wnd_scale;
+ p_tcp->rcv_wnd_scale = p_conn->rcv_wnd_scale;
+ wval = p_conn->da_timeout_value;
+ p_tcp->da_timeout_value = cpu_to_le16(wval);
+ p_tcp->ack_frequency = p_conn->ack_frequency;
+ p_tcp->connect_mode = p_conn->connect_mode;
+ } else {
+ p_tcp2 =
+ &((struct iscsi_spe_conn_offload_option2 *)p_ramrod)->tcp;
+
+ p = (u16 *)p_conn->local_mac;
+ tmp = cpu_to_le16(get_unaligned_be16(p));
+ p_tcp2->local_mac_addr_hi = tmp;
+ tmp = cpu_to_le16(get_unaligned_be16(p + 1));
+ p_tcp2->local_mac_addr_mid = tmp;
+ tmp = cpu_to_le16(get_unaligned_be16(p + 2));
+ p_tcp2->local_mac_addr_lo = tmp;
+
+ p = (u16 *)p_conn->remote_mac;
+ tmp = cpu_to_le16(get_unaligned_be16(p));
+ p_tcp2->remote_mac_addr_hi = tmp;
+ tmp = cpu_to_le16(get_unaligned_be16(p + 1));
+ p_tcp2->remote_mac_addr_mid = tmp;
+ tmp = cpu_to_le16(get_unaligned_be16(p + 2));
+ p_tcp2->remote_mac_addr_lo = tmp;
+
+ p_tcp2->vlan_id = cpu_to_le16(p_conn->vlan_id);
+ p_tcp2->flags = cpu_to_le16(p_conn->tcp_flags);
+
+ p_tcp2->ip_version = p_conn->ip_version;
+ for (i = 0; i < 4; i++) {
+ dval = p_conn->remote_ip[i];
+ p_tcp2->remote_ip[i] = cpu_to_le32(dval);
+ dval = p_conn->local_ip[i];
+ p_tcp2->local_ip[i] = cpu_to_le32(dval);
+ }
+
+ p_tcp2->flow_label = cpu_to_le32(p_conn->flow_label);
+ p_tcp2->ttl = p_conn->ttl;
+ p_tcp2->tos_or_tc = p_conn->tos_or_tc;
+ p_tcp2->remote_port = cpu_to_le16(p_conn->remote_port);
+ p_tcp2->local_port = cpu_to_le16(p_conn->local_port);
+ p_tcp2->mss = cpu_to_le16(p_conn->mss);
+ p_tcp2->rcv_wnd_scale = p_conn->rcv_wnd_scale;
+ p_tcp2->connect_mode = p_conn->connect_mode;
+ wval = p_conn->syn_ip_payload_length;
+ p_tcp2->syn_ip_payload_length = cpu_to_le16(wval);
+ p_tcp2->syn_phy_addr_lo = DMA_LO_LE(p_conn->syn_phy_addr);
+ p_tcp2->syn_phy_addr_hi = DMA_HI_LE(p_conn->syn_phy_addr);
+ p_tcp2->cwnd = cpu_to_le32(p_conn->cwnd);
+ p_tcp2->ka_max_probe_cnt = p_conn->ka_probe_cnt;
+ p_tcp2->ka_timeout = cpu_to_le32(p_conn->ka_timeout);
+ p_tcp2->max_rt_time = cpu_to_le32(p_conn->max_rt_time);
+ p_tcp2->ka_interval = cpu_to_le32(p_conn->ka_interval);
+ }
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_sp_iscsi_conn_update(struct qed_hwfn *p_hwfn,
+ struct qed_iscsi_conn *p_conn,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_addr)
+{
+ struct iscsi_conn_update_ramrod_params *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc;
+ u32 dval;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_conn->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_addr;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ ISCSI_RAMROD_CMD_ID_UPDATE_CONN,
+ PROTOCOLID_TCP_ULP, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.iscsi_conn_update;
+
+ p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id);
+ p_ramrod->flags = p_conn->update_flag;
+ p_ramrod->max_seq_size = cpu_to_le32(p_conn->max_seq_size);
+ dval = p_conn->max_recv_pdu_length;
+ p_ramrod->max_recv_pdu_length = cpu_to_le32(dval);
+ dval = p_conn->max_send_pdu_length;
+ p_ramrod->max_send_pdu_length = cpu_to_le32(dval);
+ dval = p_conn->first_seq_length;
+ p_ramrod->first_seq_length = cpu_to_le32(dval);
+ p_ramrod->exp_stat_sn = cpu_to_le32(p_conn->exp_stat_sn);
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int
+qed_sp_iscsi_mac_update(struct qed_hwfn *p_hwfn,
+ struct qed_iscsi_conn *p_conn,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_addr)
+{
+ struct iscsi_spe_conn_mac_update *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc = -EINVAL;
+ u8 ucval;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_conn->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_addr;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ ISCSI_RAMROD_CMD_ID_MAC_UPDATE,
+ PROTOCOLID_TCP_ULP, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.iscsi_conn_mac_update;
+
+ p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id);
+ ucval = p_conn->remote_mac[1];
+ ((u8 *)(&p_ramrod->remote_mac_addr_hi))[0] = ucval;
+ ucval = p_conn->remote_mac[0];
+ ((u8 *)(&p_ramrod->remote_mac_addr_hi))[1] = ucval;
+ ucval = p_conn->remote_mac[3];
+ ((u8 *)(&p_ramrod->remote_mac_addr_mid))[0] = ucval;
+ ucval = p_conn->remote_mac[2];
+ ((u8 *)(&p_ramrod->remote_mac_addr_mid))[1] = ucval;
+ ucval = p_conn->remote_mac[5];
+ ((u8 *)(&p_ramrod->remote_mac_addr_lo))[0] = ucval;
+ ucval = p_conn->remote_mac[4];
+ ((u8 *)(&p_ramrod->remote_mac_addr_lo))[1] = ucval;
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_sp_iscsi_conn_terminate(struct qed_hwfn *p_hwfn,
+ struct qed_iscsi_conn *p_conn,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_addr)
+{
+ struct iscsi_spe_conn_termination *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc = -EINVAL;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_conn->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_addr;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ ISCSI_RAMROD_CMD_ID_TERMINATION_CONN,
+ PROTOCOLID_TCP_ULP, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.iscsi_conn_terminate;
+
+ p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id);
+ p_ramrod->abortive = p_conn->abortive_dsconnect;
+
+ DMA_REGPAIR_LE(p_ramrod->query_params_addr,
+ p_conn->tcp_upload_params_phys_addr);
+ DMA_REGPAIR_LE(p_ramrod->queue_cnts_addr, p_conn->queue_cnts_phys_addr);
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_sp_iscsi_conn_clear_sq(struct qed_hwfn *p_hwfn,
+ struct qed_iscsi_conn *p_conn,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_addr)
+{
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc = -EINVAL;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_conn->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_addr;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ ISCSI_RAMROD_CMD_ID_CLEAR_SQ,
+ PROTOCOLID_TCP_ULP, &init_data);
+ if (rc)
+ return rc;
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_sp_iscsi_func_stop(struct qed_hwfn *p_hwfn,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_addr)
+{
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc = 0;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_addr;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ ISCSI_RAMROD_CMD_ID_DESTROY_FUNC,
+ PROTOCOLID_TCP_ULP, &init_data);
+ if (rc)
+ return rc;
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+ qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_TCP_ULP);
+ return rc;
+}
+
+static void __iomem *qed_iscsi_get_db_addr(struct qed_hwfn *p_hwfn, u32 cid)
+{
+ return (u8 __iomem *)p_hwfn->doorbells +
+ qed_db_addr(cid, DQ_DEMS_LEGACY);
+}
+
+static void __iomem *qed_iscsi_get_primary_bdq_prod(struct qed_hwfn *p_hwfn,
+ u8 bdq_id)
+{
+ if (RESC_NUM(p_hwfn, QED_BDQ)) {
+ return (u8 __iomem *)p_hwfn->regview +
+ GET_GTT_BDQ_REG_ADDR(GTT_BAR0_MAP_REG_MSDM_RAM,
+ MSTORM_SCSI_BDQ_EXT_PROD,
+ RESC_START(p_hwfn, QED_BDQ), bdq_id);
+ } else {
+ DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
+ return NULL;
+ }
+}
+
+static void __iomem *qed_iscsi_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn,
+ u8 bdq_id)
+{
+ if (RESC_NUM(p_hwfn, QED_BDQ)) {
+ return (u8 __iomem *)p_hwfn->regview +
+ GET_GTT_BDQ_REG_ADDR(GTT_BAR0_MAP_REG_TSDM_RAM,
+ TSTORM_SCSI_BDQ_EXT_PROD,
+ RESC_START(p_hwfn, QED_BDQ), bdq_id);
+ } else {
+ DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
+ return NULL;
+ }
+}
+
+static int qed_iscsi_setup_connection(struct qed_iscsi_conn *p_conn)
+{
+ if (!p_conn->queue_cnts_virt_addr)
+ goto nomem;
+ memset(p_conn->queue_cnts_virt_addr, 0,
+ sizeof(*p_conn->queue_cnts_virt_addr));
+
+ if (!p_conn->tcp_upload_params_virt_addr)
+ goto nomem;
+ memset(p_conn->tcp_upload_params_virt_addr, 0,
+ sizeof(*p_conn->tcp_upload_params_virt_addr));
+
+ if (!p_conn->r2tq.p_virt_addr)
+ goto nomem;
+ qed_chain_pbl_zero_mem(&p_conn->r2tq);
+
+ if (!p_conn->uhq.p_virt_addr)
+ goto nomem;
+ qed_chain_pbl_zero_mem(&p_conn->uhq);
+
+ if (!p_conn->xhq.p_virt_addr)
+ goto nomem;
+ qed_chain_pbl_zero_mem(&p_conn->xhq);
+
+ return 0;
+nomem:
+ return -ENOMEM;
+}
+
+static int qed_iscsi_allocate_connection(struct qed_hwfn *p_hwfn,
+ struct qed_iscsi_conn **p_out_conn)
+{
+ struct scsi_terminate_extra_params *p_q_cnts = NULL;
+ struct qed_iscsi_pf_params *p_params = NULL;
+ struct qed_chain_init_params params = {
+ .mode = QED_CHAIN_MODE_PBL,
+ .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+ .cnt_type = QED_CHAIN_CNT_TYPE_U16,
+ };
+ struct tcp_upload_params *p_tcp = NULL;
+ struct qed_iscsi_conn *p_conn = NULL;
+ int rc = 0;
+
+ /* Try finding a free connection that can be used */
+ spin_lock_bh(&p_hwfn->p_iscsi_info->lock);
+ if (!list_empty(&p_hwfn->p_iscsi_info->free_list))
+ p_conn = list_first_entry(&p_hwfn->p_iscsi_info->free_list,
+ struct qed_iscsi_conn, list_entry);
+ if (p_conn) {
+ list_del(&p_conn->list_entry);
+ spin_unlock_bh(&p_hwfn->p_iscsi_info->lock);
+ *p_out_conn = p_conn;
+ return 0;
+ }
+ spin_unlock_bh(&p_hwfn->p_iscsi_info->lock);
+
+ /* Need to allocate a new connection */
+ p_params = &p_hwfn->pf_params.iscsi_pf_params;
+
+ p_conn = kzalloc(sizeof(*p_conn), GFP_KERNEL);
+ if (!p_conn)
+ return -ENOMEM;
+
+ p_q_cnts = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(*p_q_cnts),
+ &p_conn->queue_cnts_phys_addr,
+ GFP_KERNEL);
+ if (!p_q_cnts)
+ goto nomem_queue_cnts_param;
+ p_conn->queue_cnts_virt_addr = p_q_cnts;
+
+ p_tcp = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(*p_tcp),
+ &p_conn->tcp_upload_params_phys_addr,
+ GFP_KERNEL);
+ if (!p_tcp)
+ goto nomem_upload_param;
+ p_conn->tcp_upload_params_virt_addr = p_tcp;
+
+ params.num_elems = p_params->num_r2tq_pages_in_ring *
+ QED_CHAIN_PAGE_SIZE / sizeof(struct iscsi_wqe);
+ params.elem_size = sizeof(struct iscsi_wqe);
+
+ rc = qed_chain_alloc(p_hwfn->cdev, &p_conn->r2tq, &params);
+ if (rc)
+ goto nomem_r2tq;
+
+ params.num_elems = p_params->num_uhq_pages_in_ring *
+ QED_CHAIN_PAGE_SIZE / sizeof(struct iscsi_uhqe);
+ params.elem_size = sizeof(struct iscsi_uhqe);
+
+ rc = qed_chain_alloc(p_hwfn->cdev, &p_conn->uhq, &params);
+ if (rc)
+ goto nomem_uhq;
+
+ params.elem_size = sizeof(struct iscsi_xhqe);
+
+ rc = qed_chain_alloc(p_hwfn->cdev, &p_conn->xhq, &params);
+ if (rc)
+ goto nomem;
+
+ p_conn->free_on_delete = true;
+ *p_out_conn = p_conn;
+ return 0;
+
+nomem:
+ qed_chain_free(p_hwfn->cdev, &p_conn->uhq);
+nomem_uhq:
+ qed_chain_free(p_hwfn->cdev, &p_conn->r2tq);
+nomem_r2tq:
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(struct tcp_upload_params),
+ p_conn->tcp_upload_params_virt_addr,
+ p_conn->tcp_upload_params_phys_addr);
+nomem_upload_param:
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(struct scsi_terminate_extra_params),
+ p_conn->queue_cnts_virt_addr,
+ p_conn->queue_cnts_phys_addr);
+nomem_queue_cnts_param:
+ kfree(p_conn);
+
+ return -ENOMEM;
+}
+
+static int qed_iscsi_acquire_connection(struct qed_hwfn *p_hwfn,
+ struct qed_iscsi_conn *p_in_conn,
+ struct qed_iscsi_conn **p_out_conn)
+{
+ struct qed_iscsi_conn *p_conn = NULL;
+ int rc = 0;
+ u32 icid;
+
+ spin_lock_bh(&p_hwfn->p_iscsi_info->lock);
+ rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_TCP_ULP, &icid);
+ spin_unlock_bh(&p_hwfn->p_iscsi_info->lock);
+ if (rc)
+ return rc;
+
+ /* Use input connection or allocate a new one */
+ if (p_in_conn)
+ p_conn = p_in_conn;
+ else
+ rc = qed_iscsi_allocate_connection(p_hwfn, &p_conn);
+
+ if (!rc)
+ rc = qed_iscsi_setup_connection(p_conn);
+
+ if (rc) {
+ spin_lock_bh(&p_hwfn->p_iscsi_info->lock);
+ qed_cxt_release_cid(p_hwfn, icid);
+ spin_unlock_bh(&p_hwfn->p_iscsi_info->lock);
+ return rc;
+ }
+
+ p_conn->icid = icid;
+ p_conn->conn_id = (u16)icid;
+ p_conn->fw_cid = (p_hwfn->hw_info.opaque_fid << 16) | icid;
+
+ *p_out_conn = p_conn;
+
+ return rc;
+}
+
+static void qed_iscsi_release_connection(struct qed_hwfn *p_hwfn,
+ struct qed_iscsi_conn *p_conn)
+{
+ spin_lock_bh(&p_hwfn->p_iscsi_info->lock);
+ list_add_tail(&p_conn->list_entry, &p_hwfn->p_iscsi_info->free_list);
+ qed_cxt_release_cid(p_hwfn, p_conn->icid);
+ spin_unlock_bh(&p_hwfn->p_iscsi_info->lock);
+}
+
+static void qed_iscsi_free_connection(struct qed_hwfn *p_hwfn,
+ struct qed_iscsi_conn *p_conn)
+{
+ qed_chain_free(p_hwfn->cdev, &p_conn->xhq);
+ qed_chain_free(p_hwfn->cdev, &p_conn->uhq);
+ qed_chain_free(p_hwfn->cdev, &p_conn->r2tq);
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(struct tcp_upload_params),
+ p_conn->tcp_upload_params_virt_addr,
+ p_conn->tcp_upload_params_phys_addr);
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(struct scsi_terminate_extra_params),
+ p_conn->queue_cnts_virt_addr,
+ p_conn->queue_cnts_phys_addr);
+ kfree(p_conn);
+}
+
+int qed_iscsi_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_iscsi_info *p_iscsi_info;
+
+ p_iscsi_info = kzalloc(sizeof(*p_iscsi_info), GFP_KERNEL);
+ if (!p_iscsi_info)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&p_iscsi_info->free_list);
+
+ p_hwfn->p_iscsi_info = p_iscsi_info;
+ return 0;
+}
+
+void qed_iscsi_setup(struct qed_hwfn *p_hwfn)
+{
+ spin_lock_init(&p_hwfn->p_iscsi_info->lock);
+}
+
+void qed_iscsi_free(struct qed_hwfn *p_hwfn)
+{
+ struct qed_iscsi_conn *p_conn = NULL;
+
+ if (!p_hwfn->p_iscsi_info)
+ return;
+
+ while (!list_empty(&p_hwfn->p_iscsi_info->free_list)) {
+ p_conn = list_first_entry(&p_hwfn->p_iscsi_info->free_list,
+ struct qed_iscsi_conn, list_entry);
+ if (p_conn) {
+ list_del(&p_conn->list_entry);
+ qed_iscsi_free_connection(p_hwfn, p_conn);
+ }
+ }
+
+ kfree(p_hwfn->p_iscsi_info);
+ p_hwfn->p_iscsi_info = NULL;
+}
+
+static void _qed_iscsi_get_tstats(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_iscsi_stats *p_stats)
+{
+ struct tstorm_iscsi_stats_drv tstats;
+ u32 tstats_addr;
+
+ memset(&tstats, 0, sizeof(tstats));
+ tstats_addr = BAR0_MAP_REG_TSDM_RAM +
+ TSTORM_ISCSI_RX_STATS_OFFSET(p_hwfn->rel_pf_id);
+ qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats));
+
+ p_stats->iscsi_rx_bytes_cnt =
+ HILO_64_REGPAIR(tstats.iscsi_rx_bytes_cnt);
+ p_stats->iscsi_rx_packet_cnt =
+ HILO_64_REGPAIR(tstats.iscsi_rx_packet_cnt);
+ p_stats->iscsi_rx_new_ooo_isle_events_cnt =
+ HILO_64_REGPAIR(tstats.iscsi_rx_new_ooo_isle_events_cnt);
+ p_stats->iscsi_cmdq_threshold_cnt =
+ le32_to_cpu(tstats.iscsi_cmdq_threshold_cnt);
+ p_stats->iscsi_rq_threshold_cnt =
+ le32_to_cpu(tstats.iscsi_rq_threshold_cnt);
+ p_stats->iscsi_immq_threshold_cnt =
+ le32_to_cpu(tstats.iscsi_immq_threshold_cnt);
+}
+
+static void _qed_iscsi_get_mstats(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_iscsi_stats *p_stats)
+{
+ struct mstorm_iscsi_stats_drv mstats;
+ u32 mstats_addr;
+
+ memset(&mstats, 0, sizeof(mstats));
+ mstats_addr = BAR0_MAP_REG_MSDM_RAM +
+ MSTORM_ISCSI_RX_STATS_OFFSET(p_hwfn->rel_pf_id);
+ qed_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, sizeof(mstats));
+
+ p_stats->iscsi_rx_dropped_pdus_task_not_valid =
+ HILO_64_REGPAIR(mstats.iscsi_rx_dropped_pdus_task_not_valid);
+}
+
+static void _qed_iscsi_get_ustats(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_iscsi_stats *p_stats)
+{
+ struct ustorm_iscsi_stats_drv ustats;
+ u32 ustats_addr;
+
+ memset(&ustats, 0, sizeof(ustats));
+ ustats_addr = BAR0_MAP_REG_USDM_RAM +
+ USTORM_ISCSI_RX_STATS_OFFSET(p_hwfn->rel_pf_id);
+ qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, sizeof(ustats));
+
+ p_stats->iscsi_rx_data_pdu_cnt =
+ HILO_64_REGPAIR(ustats.iscsi_rx_data_pdu_cnt);
+ p_stats->iscsi_rx_r2t_pdu_cnt =
+ HILO_64_REGPAIR(ustats.iscsi_rx_r2t_pdu_cnt);
+ p_stats->iscsi_rx_total_pdu_cnt =
+ HILO_64_REGPAIR(ustats.iscsi_rx_total_pdu_cnt);
+}
+
+static void _qed_iscsi_get_xstats(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_iscsi_stats *p_stats)
+{
+ struct xstorm_iscsi_stats_drv xstats;
+ u32 xstats_addr;
+
+ memset(&xstats, 0, sizeof(xstats));
+ xstats_addr = BAR0_MAP_REG_XSDM_RAM +
+ XSTORM_ISCSI_TX_STATS_OFFSET(p_hwfn->rel_pf_id);
+ qed_memcpy_from(p_hwfn, p_ptt, &xstats, xstats_addr, sizeof(xstats));
+
+ p_stats->iscsi_tx_go_to_slow_start_event_cnt =
+ HILO_64_REGPAIR(xstats.iscsi_tx_go_to_slow_start_event_cnt);
+ p_stats->iscsi_tx_fast_retransmit_event_cnt =
+ HILO_64_REGPAIR(xstats.iscsi_tx_fast_retransmit_event_cnt);
+}
+
+static void _qed_iscsi_get_ystats(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_iscsi_stats *p_stats)
+{
+ struct ystorm_iscsi_stats_drv ystats;
+ u32 ystats_addr;
+
+ memset(&ystats, 0, sizeof(ystats));
+ ystats_addr = BAR0_MAP_REG_YSDM_RAM +
+ YSTORM_ISCSI_TX_STATS_OFFSET(p_hwfn->rel_pf_id);
+ qed_memcpy_from(p_hwfn, p_ptt, &ystats, ystats_addr, sizeof(ystats));
+
+ p_stats->iscsi_tx_data_pdu_cnt =
+ HILO_64_REGPAIR(ystats.iscsi_tx_data_pdu_cnt);
+ p_stats->iscsi_tx_r2t_pdu_cnt =
+ HILO_64_REGPAIR(ystats.iscsi_tx_r2t_pdu_cnt);
+ p_stats->iscsi_tx_total_pdu_cnt =
+ HILO_64_REGPAIR(ystats.iscsi_tx_total_pdu_cnt);
+}
+
+static void _qed_iscsi_get_pstats(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_iscsi_stats *p_stats)
+{
+ struct pstorm_iscsi_stats_drv pstats;
+ u32 pstats_addr;
+
+ memset(&pstats, 0, sizeof(pstats));
+ pstats_addr = BAR0_MAP_REG_PSDM_RAM +
+ PSTORM_ISCSI_TX_STATS_OFFSET(p_hwfn->rel_pf_id);
+ qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats));
+
+ p_stats->iscsi_tx_bytes_cnt =
+ HILO_64_REGPAIR(pstats.iscsi_tx_bytes_cnt);
+ p_stats->iscsi_tx_packet_cnt =
+ HILO_64_REGPAIR(pstats.iscsi_tx_packet_cnt);
+}
+
+static int qed_iscsi_get_stats(struct qed_hwfn *p_hwfn,
+ struct qed_iscsi_stats *stats,
+ bool is_atomic)
+{
+ struct qed_ptt *p_ptt;
+
+ memset(stats, 0, sizeof(*stats));
+
+ p_ptt = qed_ptt_acquire_context(p_hwfn, is_atomic);
+ if (!p_ptt) {
+ DP_ERR(p_hwfn, "Failed to acquire ptt\n");
+ return -EAGAIN;
+ }
+
+ _qed_iscsi_get_tstats(p_hwfn, p_ptt, stats);
+ _qed_iscsi_get_mstats(p_hwfn, p_ptt, stats);
+ _qed_iscsi_get_ustats(p_hwfn, p_ptt, stats);
+
+ _qed_iscsi_get_xstats(p_hwfn, p_ptt, stats);
+ _qed_iscsi_get_ystats(p_hwfn, p_ptt, stats);
+ _qed_iscsi_get_pstats(p_hwfn, p_ptt, stats);
+
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ return 0;
+}
+
+struct qed_hash_iscsi_con {
+ struct hlist_node node;
+ struct qed_iscsi_conn *con;
+};
+
+static int qed_fill_iscsi_dev_info(struct qed_dev *cdev,
+ struct qed_dev_iscsi_info *info)
+{
+ struct qed_hwfn *hwfn = QED_AFFIN_HWFN(cdev);
+
+ int rc;
+
+ memset(info, 0, sizeof(*info));
+ rc = qed_fill_dev_info(cdev, &info->common);
+
+ info->primary_dbq_rq_addr =
+ qed_iscsi_get_primary_bdq_prod(hwfn, BDQ_ID_RQ);
+ info->secondary_bdq_rq_addr =
+ qed_iscsi_get_secondary_bdq_prod(hwfn, BDQ_ID_RQ);
+
+ info->num_cqs = FEAT_NUM(hwfn, QED_ISCSI_CQ);
+
+ return rc;
+}
+
+static void qed_register_iscsi_ops(struct qed_dev *cdev,
+ struct qed_iscsi_cb_ops *ops, void *cookie)
+{
+ cdev->protocol_ops.iscsi = ops;
+ cdev->ops_cookie = cookie;
+}
+
+static struct qed_hash_iscsi_con *qed_iscsi_get_hash(struct qed_dev *cdev,
+ u32 handle)
+{
+ struct qed_hash_iscsi_con *hash_con = NULL;
+
+ if (!(cdev->flags & QED_FLAG_STORAGE_STARTED))
+ return NULL;
+
+ hash_for_each_possible(cdev->connections, hash_con, node, handle) {
+ if (hash_con->con->icid == handle)
+ break;
+ }
+
+ if (!hash_con || (hash_con->con->icid != handle))
+ return NULL;
+
+ return hash_con;
+}
+
+static int qed_iscsi_stop(struct qed_dev *cdev)
+{
+ int rc;
+
+ if (!(cdev->flags & QED_FLAG_STORAGE_STARTED)) {
+ DP_NOTICE(cdev, "iscsi already stopped\n");
+ return 0;
+ }
+
+ if (!hash_empty(cdev->connections)) {
+ DP_NOTICE(cdev,
+ "Can't stop iscsi - not all connections were returned\n");
+ return -EINVAL;
+ }
+
+ /* Stop the iscsi */
+ rc = qed_sp_iscsi_func_stop(QED_AFFIN_HWFN(cdev), QED_SPQ_MODE_EBLOCK,
+ NULL);
+ cdev->flags &= ~QED_FLAG_STORAGE_STARTED;
+
+ return rc;
+}
+
+static int qed_iscsi_start(struct qed_dev *cdev,
+ struct qed_iscsi_tid *tasks,
+ void *event_context,
+ iscsi_event_cb_t async_event_cb)
+{
+ int rc;
+ struct qed_tid_mem *tid_info;
+
+ if (cdev->flags & QED_FLAG_STORAGE_STARTED) {
+ DP_NOTICE(cdev, "iscsi already started;\n");
+ return 0;
+ }
+
+ rc = qed_sp_iscsi_func_start(QED_AFFIN_HWFN(cdev), QED_SPQ_MODE_EBLOCK,
+ NULL, event_context, async_event_cb);
+ if (rc) {
+ DP_NOTICE(cdev, "Failed to start iscsi\n");
+ return rc;
+ }
+
+ cdev->flags |= QED_FLAG_STORAGE_STARTED;
+ hash_init(cdev->connections);
+
+ if (!tasks)
+ return 0;
+
+ tid_info = kzalloc(sizeof(*tid_info), GFP_KERNEL);
+
+ if (!tid_info) {
+ qed_iscsi_stop(cdev);
+ return -ENOMEM;
+ }
+
+ rc = qed_cxt_get_tid_mem_info(QED_AFFIN_HWFN(cdev), tid_info);
+ if (rc) {
+ DP_NOTICE(cdev, "Failed to gather task information\n");
+ qed_iscsi_stop(cdev);
+ kfree(tid_info);
+ return rc;
+ }
+
+ /* Fill task information */
+ tasks->size = tid_info->tid_size;
+ tasks->num_tids_per_block = tid_info->num_tids_per_block;
+ memcpy(tasks->blocks, tid_info->blocks,
+ MAX_TID_BLOCKS_ISCSI * sizeof(u8 *));
+
+ kfree(tid_info);
+
+ return 0;
+}
+
+static int qed_iscsi_acquire_conn(struct qed_dev *cdev,
+ u32 *handle,
+ u32 *fw_cid, void __iomem **p_doorbell)
+{
+ struct qed_hash_iscsi_con *hash_con;
+ int rc;
+
+ /* Allocate a hashed connection */
+ hash_con = kzalloc(sizeof(*hash_con), GFP_ATOMIC);
+ if (!hash_con)
+ return -ENOMEM;
+
+ /* Acquire the connection */
+ rc = qed_iscsi_acquire_connection(QED_AFFIN_HWFN(cdev), NULL,
+ &hash_con->con);
+ if (rc) {
+ DP_NOTICE(cdev, "Failed to acquire Connection\n");
+ kfree(hash_con);
+ return rc;
+ }
+
+ /* Added the connection to hash table */
+ *handle = hash_con->con->icid;
+ *fw_cid = hash_con->con->fw_cid;
+ hash_add(cdev->connections, &hash_con->node, *handle);
+
+ if (p_doorbell)
+ *p_doorbell = qed_iscsi_get_db_addr(QED_AFFIN_HWFN(cdev),
+ *handle);
+
+ return 0;
+}
+
+static int qed_iscsi_release_conn(struct qed_dev *cdev, u32 handle)
+{
+ struct qed_hash_iscsi_con *hash_con;
+
+ hash_con = qed_iscsi_get_hash(cdev, handle);
+ if (!hash_con) {
+ DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
+ handle);
+ return -EINVAL;
+ }
+
+ hlist_del(&hash_con->node);
+ qed_iscsi_release_connection(QED_AFFIN_HWFN(cdev), hash_con->con);
+ kfree(hash_con);
+
+ return 0;
+}
+
+static int qed_iscsi_offload_conn(struct qed_dev *cdev,
+ u32 handle,
+ struct qed_iscsi_params_offload *conn_info)
+{
+ struct qed_hash_iscsi_con *hash_con;
+ struct qed_iscsi_conn *con;
+
+ hash_con = qed_iscsi_get_hash(cdev, handle);
+ if (!hash_con) {
+ DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
+ handle);
+ return -EINVAL;
+ }
+
+ /* Update the connection with information from the params */
+ con = hash_con->con;
+
+ ether_addr_copy(con->local_mac, conn_info->src.mac);
+ ether_addr_copy(con->remote_mac, conn_info->dst.mac);
+ memcpy(con->local_ip, conn_info->src.ip, sizeof(con->local_ip));
+ memcpy(con->remote_ip, conn_info->dst.ip, sizeof(con->remote_ip));
+ con->local_port = conn_info->src.port;
+ con->remote_port = conn_info->dst.port;
+
+ con->layer_code = conn_info->layer_code;
+ con->sq_pbl_addr = conn_info->sq_pbl_addr;
+ con->initial_ack = conn_info->initial_ack;
+ con->vlan_id = conn_info->vlan_id;
+ con->tcp_flags = conn_info->tcp_flags;
+ con->ip_version = conn_info->ip_version;
+ con->default_cq = conn_info->default_cq;
+ con->ka_max_probe_cnt = conn_info->ka_max_probe_cnt;
+ con->dup_ack_theshold = conn_info->dup_ack_theshold;
+ con->rcv_next = conn_info->rcv_next;
+ con->snd_una = conn_info->snd_una;
+ con->snd_next = conn_info->snd_next;
+ con->snd_max = conn_info->snd_max;
+ con->snd_wnd = conn_info->snd_wnd;
+ con->rcv_wnd = conn_info->rcv_wnd;
+ con->snd_wl1 = conn_info->snd_wl1;
+ con->cwnd = conn_info->cwnd;
+ con->ss_thresh = conn_info->ss_thresh;
+ con->srtt = conn_info->srtt;
+ con->rtt_var = conn_info->rtt_var;
+ con->ts_recent = conn_info->ts_recent;
+ con->ts_recent_age = conn_info->ts_recent_age;
+ con->total_rt = conn_info->total_rt;
+ con->ka_timeout_delta = conn_info->ka_timeout_delta;
+ con->rt_timeout_delta = conn_info->rt_timeout_delta;
+ con->dup_ack_cnt = conn_info->dup_ack_cnt;
+ con->snd_wnd_probe_cnt = conn_info->snd_wnd_probe_cnt;
+ con->ka_probe_cnt = conn_info->ka_probe_cnt;
+ con->rt_cnt = conn_info->rt_cnt;
+ con->flow_label = conn_info->flow_label;
+ con->ka_timeout = conn_info->ka_timeout;
+ con->ka_interval = conn_info->ka_interval;
+ con->max_rt_time = conn_info->max_rt_time;
+ con->initial_rcv_wnd = conn_info->initial_rcv_wnd;
+ con->ttl = conn_info->ttl;
+ con->tos_or_tc = conn_info->tos_or_tc;
+ con->remote_port = conn_info->remote_port;
+ con->local_port = conn_info->local_port;
+ con->mss = conn_info->mss;
+ con->snd_wnd_scale = conn_info->snd_wnd_scale;
+ con->rcv_wnd_scale = conn_info->rcv_wnd_scale;
+ con->da_timeout_value = conn_info->da_timeout_value;
+ con->ack_frequency = conn_info->ack_frequency;
+
+ /* Set default values on other connection fields */
+ con->offl_flags = 0x1;
+
+ return qed_sp_iscsi_conn_offload(QED_AFFIN_HWFN(cdev), con,
+ QED_SPQ_MODE_EBLOCK, NULL);
+}
+
+static int qed_iscsi_update_conn(struct qed_dev *cdev,
+ u32 handle,
+ struct qed_iscsi_params_update *conn_info)
+{
+ struct qed_hash_iscsi_con *hash_con;
+ struct qed_iscsi_conn *con;
+
+ hash_con = qed_iscsi_get_hash(cdev, handle);
+ if (!hash_con) {
+ DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
+ handle);
+ return -EINVAL;
+ }
+
+ /* Update the connection with information from the params */
+ con = hash_con->con;
+ con->update_flag = conn_info->update_flag;
+ con->max_seq_size = conn_info->max_seq_size;
+ con->max_recv_pdu_length = conn_info->max_recv_pdu_length;
+ con->max_send_pdu_length = conn_info->max_send_pdu_length;
+ con->first_seq_length = conn_info->first_seq_length;
+ con->exp_stat_sn = conn_info->exp_stat_sn;
+
+ return qed_sp_iscsi_conn_update(QED_AFFIN_HWFN(cdev), con,
+ QED_SPQ_MODE_EBLOCK, NULL);
+}
+
+static int qed_iscsi_clear_conn_sq(struct qed_dev *cdev, u32 handle)
+{
+ struct qed_hash_iscsi_con *hash_con;
+
+ hash_con = qed_iscsi_get_hash(cdev, handle);
+ if (!hash_con) {
+ DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
+ handle);
+ return -EINVAL;
+ }
+
+ return qed_sp_iscsi_conn_clear_sq(QED_AFFIN_HWFN(cdev), hash_con->con,
+ QED_SPQ_MODE_EBLOCK, NULL);
+}
+
+static int qed_iscsi_destroy_conn(struct qed_dev *cdev,
+ u32 handle, u8 abrt_conn)
+{
+ struct qed_hash_iscsi_con *hash_con;
+
+ hash_con = qed_iscsi_get_hash(cdev, handle);
+ if (!hash_con) {
+ DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
+ handle);
+ return -EINVAL;
+ }
+
+ hash_con->con->abortive_dsconnect = abrt_conn;
+
+ return qed_sp_iscsi_conn_terminate(QED_AFFIN_HWFN(cdev), hash_con->con,
+ QED_SPQ_MODE_EBLOCK, NULL);
+}
+
+static int qed_iscsi_stats_context(struct qed_dev *cdev,
+ struct qed_iscsi_stats *stats,
+ bool is_atomic)
+{
+ return qed_iscsi_get_stats(QED_AFFIN_HWFN(cdev), stats, is_atomic);
+}
+
+static int qed_iscsi_stats(struct qed_dev *cdev, struct qed_iscsi_stats *stats)
+{
+ return qed_iscsi_stats_context(cdev, stats, false);
+}
+
+static int qed_iscsi_change_mac(struct qed_dev *cdev,
+ u32 handle, const u8 *mac)
+{
+ struct qed_hash_iscsi_con *hash_con;
+
+ hash_con = qed_iscsi_get_hash(cdev, handle);
+ if (!hash_con) {
+ DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
+ handle);
+ return -EINVAL;
+ }
+
+ return qed_sp_iscsi_mac_update(QED_AFFIN_HWFN(cdev), hash_con->con,
+ QED_SPQ_MODE_EBLOCK, NULL);
+}
+
+void qed_get_protocol_stats_iscsi(struct qed_dev *cdev,
+ struct qed_mcp_iscsi_stats *stats,
+ bool is_atomic)
+{
+ struct qed_iscsi_stats proto_stats;
+
+ /* Retrieve FW statistics */
+ memset(&proto_stats, 0, sizeof(proto_stats));
+ if (qed_iscsi_stats_context(cdev, &proto_stats, is_atomic)) {
+ DP_VERBOSE(cdev, QED_MSG_STORAGE,
+ "Failed to collect ISCSI statistics\n");
+ return;
+ }
+
+ /* Translate FW statistics into struct */
+ stats->rx_pdus = proto_stats.iscsi_rx_total_pdu_cnt;
+ stats->tx_pdus = proto_stats.iscsi_tx_total_pdu_cnt;
+ stats->rx_bytes = proto_stats.iscsi_rx_bytes_cnt;
+ stats->tx_bytes = proto_stats.iscsi_tx_bytes_cnt;
+}
+
+static const struct qed_iscsi_ops qed_iscsi_ops_pass = {
+ .common = &qed_common_ops_pass,
+ .ll2 = &qed_ll2_ops_pass,
+ .fill_dev_info = &qed_fill_iscsi_dev_info,
+ .register_ops = &qed_register_iscsi_ops,
+ .start = &qed_iscsi_start,
+ .stop = &qed_iscsi_stop,
+ .acquire_conn = &qed_iscsi_acquire_conn,
+ .release_conn = &qed_iscsi_release_conn,
+ .offload_conn = &qed_iscsi_offload_conn,
+ .update_conn = &qed_iscsi_update_conn,
+ .destroy_conn = &qed_iscsi_destroy_conn,
+ .clear_sq = &qed_iscsi_clear_conn_sq,
+ .get_stats = &qed_iscsi_stats,
+ .change_mac = &qed_iscsi_change_mac,
+};
+
+const struct qed_iscsi_ops *qed_get_iscsi_ops(void)
+{
+ return &qed_iscsi_ops_pass;
+}
+EXPORT_SYMBOL(qed_get_iscsi_ops);
+
+void qed_put_iscsi_ops(void)
+{
+}
+EXPORT_SYMBOL(qed_put_iscsi_ops);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h
new file mode 100644
index 000000000..974cb8d26
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ * Copyright (c) 2019-2020 Marvell International Ltd.
+ */
+
+#ifndef _QED_ISCSI_H
+#define _QED_ISCSI_H
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/qed/tcp_common.h>
+#include <linux/qed/qed_iscsi_if.h>
+#include <linux/qed/qed_chain.h>
+#include "qed.h"
+#include "qed_hsi.h"
+#include "qed_mcp.h"
+#include "qed_sp.h"
+
+struct qed_iscsi_info {
+ spinlock_t lock; /* Connection resources. */
+ struct list_head free_list;
+ u16 max_num_outstanding_tasks;
+ void *event_context;
+ iscsi_event_cb_t event_cb;
+};
+
+#if IS_ENABLED(CONFIG_QED_ISCSI)
+int qed_iscsi_alloc(struct qed_hwfn *p_hwfn);
+
+void qed_iscsi_setup(struct qed_hwfn *p_hwfn);
+
+void qed_iscsi_free(struct qed_hwfn *p_hwfn);
+
+/**
+ * qed_get_protocol_stats_iscsi(): Fills provided statistics
+ * struct with statistics.
+ *
+ * @cdev: Qed dev pointer.
+ * @stats: Points to struct that will be filled with statistics.
+ * @is_atomic: Hint from the caller - if the func can sleep or not.
+ *
+ * Context: The function should not sleep in case is_atomic == true.
+ * Return: Void.
+ */
+void qed_get_protocol_stats_iscsi(struct qed_dev *cdev,
+ struct qed_mcp_iscsi_stats *stats,
+ bool is_atomic);
+#else /* IS_ENABLED(CONFIG_QED_ISCSI) */
+static inline int qed_iscsi_alloc(struct qed_hwfn *p_hwfn)
+{
+ return -EINVAL;
+}
+
+static inline void qed_iscsi_setup(struct qed_hwfn *p_hwfn) {}
+
+static inline void qed_iscsi_free(struct qed_hwfn *p_hwfn) {}
+
+static inline void
+qed_get_protocol_stats_iscsi(struct qed_dev *cdev,
+ struct qed_mcp_iscsi_stats *stats,
+ bool is_atomic) {}
+#endif /* IS_ENABLED(CONFIG_QED_ISCSI) */
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
new file mode 100644
index 000000000..1d1d4caad
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
@@ -0,0 +1,3262 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ * Copyright (c) 2019-2020 Marvell International Ltd.
+ */
+
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/spinlock.h>
+#include <linux/tcp.h>
+#include "qed_cxt.h"
+#include "qed_hw.h"
+#include "qed_ll2.h"
+#include "qed_rdma.h"
+#include "qed_reg_addr.h"
+#include "qed_sp.h"
+#include "qed_ooo.h"
+
+#define QED_IWARP_ORD_DEFAULT 32
+#define QED_IWARP_IRD_DEFAULT 32
+#define QED_IWARP_MAX_FW_MSS 4120
+
+#define QED_EP_SIG 0xecabcdef
+
+struct mpa_v2_hdr {
+ __be16 ird;
+ __be16 ord;
+};
+
+#define MPA_V2_PEER2PEER_MODEL 0x8000
+#define MPA_V2_SEND_RTR 0x4000 /* on ird */
+#define MPA_V2_READ_RTR 0x4000 /* on ord */
+#define MPA_V2_WRITE_RTR 0x8000
+#define MPA_V2_IRD_ORD_MASK 0x3FFF
+
+#define MPA_REV2(_mpa_rev) ((_mpa_rev) == MPA_NEGOTIATION_TYPE_ENHANCED)
+
+#define QED_IWARP_INVALID_TCP_CID 0xffffffff
+
+#define QED_IWARP_RCV_WND_SIZE_DEF_BB_2P (200 * 1024)
+#define QED_IWARP_RCV_WND_SIZE_DEF_BB_4P (100 * 1024)
+#define QED_IWARP_RCV_WND_SIZE_DEF_AH_2P (150 * 1024)
+#define QED_IWARP_RCV_WND_SIZE_DEF_AH_4P (90 * 1024)
+
+#define QED_IWARP_RCV_WND_SIZE_MIN (0xffff)
+#define TIMESTAMP_HEADER_SIZE (12)
+#define QED_IWARP_MAX_FIN_RT_DEFAULT (2)
+
+#define QED_IWARP_TS_EN BIT(0)
+#define QED_IWARP_DA_EN BIT(1)
+#define QED_IWARP_PARAM_CRC_NEEDED (1)
+#define QED_IWARP_PARAM_P2P (1)
+
+#define QED_IWARP_DEF_MAX_RT_TIME (0)
+#define QED_IWARP_DEF_CWND_FACTOR (4)
+#define QED_IWARP_DEF_KA_MAX_PROBE_CNT (5)
+#define QED_IWARP_DEF_KA_TIMEOUT (1200000) /* 20 min */
+#define QED_IWARP_DEF_KA_INTERVAL (1000) /* 1 sec */
+
+static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code,
+ __le16 echo, union event_ring_data *data,
+ u8 fw_return_code);
+
+/* Override devinfo with iWARP specific values */
+void qed_iwarp_init_devinfo(struct qed_hwfn *p_hwfn)
+{
+ struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
+
+ dev->max_inline = IWARP_REQ_MAX_INLINE_DATA_SIZE;
+ dev->max_qp = min_t(u32,
+ IWARP_MAX_QPS,
+ p_hwfn->p_rdma_info->num_qps) -
+ QED_IWARP_PREALLOC_CNT;
+
+ dev->max_cq = dev->max_qp;
+
+ dev->max_qp_resp_rd_atomic_resc = QED_IWARP_IRD_DEFAULT;
+ dev->max_qp_req_rd_atomic_resc = QED_IWARP_ORD_DEFAULT;
+}
+
+void qed_iwarp_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_TCP;
+ qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1);
+ p_hwfn->b_rdma_enabled_in_prs = true;
+}
+
+/* We have two cid maps, one for tcp which should be used only from passive
+ * syn processing and replacing a pre-allocated ep in the list. The second
+ * for active tcp and for QPs.
+ */
+static void qed_iwarp_cid_cleaned(struct qed_hwfn *p_hwfn, u32 cid)
+{
+ cid -= qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto);
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+
+ if (cid < QED_IWARP_PREALLOC_CNT)
+ qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map,
+ cid);
+ else
+ qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
+
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
+void
+qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn,
+ struct iwarp_init_func_ramrod_data *p_ramrod)
+{
+ p_ramrod->iwarp.ll2_ooo_q_index =
+ RESC_START(p_hwfn, QED_LL2_RAM_QUEUE) +
+ p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle;
+
+ p_ramrod->tcp.tx_sws_timer = cpu_to_le16(QED_TX_SWS_TIMER_DFLT);
+ p_ramrod->tcp.two_msl_timer = cpu_to_le32(QED_TWO_MSL_TIMER_DFLT);
+ p_ramrod->tcp.max_fin_rt = QED_IWARP_MAX_FIN_RT_DEFAULT;
+
+ return;
+}
+
+static int qed_iwarp_alloc_cid(struct qed_hwfn *p_hwfn, u32 *cid)
+{
+ int rc;
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed in allocating iwarp cid\n");
+ return rc;
+ }
+ *cid += qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto);
+
+ rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, *cid);
+ if (rc)
+ qed_iwarp_cid_cleaned(p_hwfn, *cid);
+
+ return rc;
+}
+
+static void qed_iwarp_set_tcp_cid(struct qed_hwfn *p_hwfn, u32 cid)
+{
+ cid -= qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto);
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ qed_bmap_set_id(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, cid);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
+/* This function allocates a cid for passive tcp (called from syn receive)
+ * the reason it's separate from the regular cid allocation is because it
+ * is assured that these cids already have ilt allocated. They are preallocated
+ * to ensure that we won't need to allocate memory during syn processing
+ */
+static int qed_iwarp_alloc_tcp_cid(struct qed_hwfn *p_hwfn, u32 *cid)
+{
+ int rc;
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+
+ rc = qed_rdma_bmap_alloc_id(p_hwfn,
+ &p_hwfn->p_rdma_info->tcp_cid_map, cid);
+
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "can't allocate iwarp tcp cid max-count=%d\n",
+ p_hwfn->p_rdma_info->tcp_cid_map.max_count);
+
+ *cid = QED_IWARP_INVALID_TCP_CID;
+ return rc;
+ }
+
+ *cid += qed_cxt_get_proto_cid_start(p_hwfn,
+ p_hwfn->p_rdma_info->proto);
+ return 0;
+}
+
+int qed_iwarp_create_qp(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp,
+ struct qed_rdma_create_qp_out_params *out_params)
+{
+ struct iwarp_create_qp_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ u16 physical_queue;
+ u32 cid;
+ int rc;
+
+ qp->shared_queue = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ IWARP_SHARED_QUEUE_PAGE_SIZE,
+ &qp->shared_queue_phys_addr,
+ GFP_KERNEL);
+ if (!qp->shared_queue)
+ return -ENOMEM;
+
+ out_params->sq_pbl_virt = (u8 *)qp->shared_queue +
+ IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET;
+ out_params->sq_pbl_phys = qp->shared_queue_phys_addr +
+ IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET;
+ out_params->rq_pbl_virt = (u8 *)qp->shared_queue +
+ IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET;
+ out_params->rq_pbl_phys = qp->shared_queue_phys_addr +
+ IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET;
+
+ rc = qed_iwarp_alloc_cid(p_hwfn, &cid);
+ if (rc)
+ goto err1;
+
+ qp->icid = (u16)cid;
+
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.cid = qp->icid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ IWARP_RAMROD_CMD_ID_CREATE_QP,
+ PROTOCOLID_IWARP, &init_data);
+ if (rc)
+ goto err2;
+
+ p_ramrod = &p_ent->ramrod.iwarp_create_qp;
+
+ SET_FIELD(p_ramrod->flags,
+ IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN,
+ qp->fmr_and_reserved_lkey);
+
+ SET_FIELD(p_ramrod->flags,
+ IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP, qp->signal_all);
+
+ SET_FIELD(p_ramrod->flags,
+ IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN,
+ qp->incoming_rdma_read_en);
+
+ SET_FIELD(p_ramrod->flags,
+ IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN,
+ qp->incoming_rdma_write_en);
+
+ SET_FIELD(p_ramrod->flags,
+ IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN,
+ qp->incoming_atomic_en);
+
+ SET_FIELD(p_ramrod->flags,
+ IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG, qp->use_srq);
+
+ p_ramrod->pd = cpu_to_le16(qp->pd);
+ p_ramrod->sq_num_pages = cpu_to_le16(qp->sq_num_pages);
+ p_ramrod->rq_num_pages = cpu_to_le16(qp->rq_num_pages);
+
+ p_ramrod->srq_id.srq_idx = cpu_to_le16(qp->srq_id);
+ p_ramrod->srq_id.opaque_fid = cpu_to_le16(p_hwfn->hw_info.opaque_fid);
+ p_ramrod->qp_handle_for_cqe.hi = qp->qp_handle.hi;
+ p_ramrod->qp_handle_for_cqe.lo = qp->qp_handle.lo;
+
+ p_ramrod->cq_cid_for_sq =
+ cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id);
+ p_ramrod->cq_cid_for_rq =
+ cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->rq_cq_id);
+
+ p_ramrod->dpi = cpu_to_le16(qp->dpi);
+
+ physical_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
+ p_ramrod->physical_q0 = cpu_to_le16(physical_queue);
+ physical_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK);
+ p_ramrod->physical_q1 = cpu_to_le16(physical_queue);
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc)
+ goto err2;
+
+ return rc;
+
+err2:
+ qed_iwarp_cid_cleaned(p_hwfn, cid);
+err1:
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ IWARP_SHARED_QUEUE_PAGE_SIZE,
+ qp->shared_queue, qp->shared_queue_phys_addr);
+
+ return rc;
+}
+
+static int qed_iwarp_modify_fw(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
+{
+ struct iwarp_modify_qp_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ u16 flags, trans_to_state;
+ int rc;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qp->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ IWARP_RAMROD_CMD_ID_MODIFY_QP,
+ p_hwfn->p_rdma_info->proto, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.iwarp_modify_qp;
+
+ flags = le16_to_cpu(p_ramrod->flags);
+ SET_FIELD(flags, IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN, 0x1);
+ p_ramrod->flags = cpu_to_le16(flags);
+
+ if (qp->iwarp_state == QED_IWARP_QP_STATE_CLOSING)
+ trans_to_state = IWARP_MODIFY_QP_STATE_CLOSING;
+ else
+ trans_to_state = IWARP_MODIFY_QP_STATE_ERROR;
+
+ p_ramrod->transition_to_state = cpu_to_le16(trans_to_state);
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x)rc=%d\n", qp->icid, rc);
+
+ return rc;
+}
+
+enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state)
+{
+ switch (state) {
+ case QED_ROCE_QP_STATE_RESET:
+ case QED_ROCE_QP_STATE_INIT:
+ case QED_ROCE_QP_STATE_RTR:
+ return QED_IWARP_QP_STATE_IDLE;
+ case QED_ROCE_QP_STATE_RTS:
+ return QED_IWARP_QP_STATE_RTS;
+ case QED_ROCE_QP_STATE_SQD:
+ return QED_IWARP_QP_STATE_CLOSING;
+ case QED_ROCE_QP_STATE_ERR:
+ return QED_IWARP_QP_STATE_ERROR;
+ case QED_ROCE_QP_STATE_SQE:
+ return QED_IWARP_QP_STATE_TERMINATE;
+ default:
+ return QED_IWARP_QP_STATE_ERROR;
+ }
+}
+
+static enum qed_roce_qp_state
+qed_iwarp2roce_state(enum qed_iwarp_qp_state state)
+{
+ switch (state) {
+ case QED_IWARP_QP_STATE_IDLE:
+ return QED_ROCE_QP_STATE_INIT;
+ case QED_IWARP_QP_STATE_RTS:
+ return QED_ROCE_QP_STATE_RTS;
+ case QED_IWARP_QP_STATE_TERMINATE:
+ return QED_ROCE_QP_STATE_SQE;
+ case QED_IWARP_QP_STATE_CLOSING:
+ return QED_ROCE_QP_STATE_SQD;
+ case QED_IWARP_QP_STATE_ERROR:
+ return QED_ROCE_QP_STATE_ERR;
+ default:
+ return QED_ROCE_QP_STATE_ERR;
+ }
+}
+
+static const char * const iwarp_state_names[] = {
+ "IDLE",
+ "RTS",
+ "TERMINATE",
+ "CLOSING",
+ "ERROR",
+};
+
+int
+qed_iwarp_modify_qp(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp,
+ enum qed_iwarp_qp_state new_state, bool internal)
+{
+ enum qed_iwarp_qp_state prev_iw_state;
+ bool modify_fw = false;
+ int rc = 0;
+
+ /* modify QP can be called from upper-layer or as a result of async
+ * RST/FIN... therefore need to protect
+ */
+ spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock);
+ prev_iw_state = qp->iwarp_state;
+
+ if (prev_iw_state == new_state) {
+ spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock);
+ return 0;
+ }
+
+ switch (prev_iw_state) {
+ case QED_IWARP_QP_STATE_IDLE:
+ switch (new_state) {
+ case QED_IWARP_QP_STATE_RTS:
+ qp->iwarp_state = QED_IWARP_QP_STATE_RTS;
+ break;
+ case QED_IWARP_QP_STATE_ERROR:
+ qp->iwarp_state = QED_IWARP_QP_STATE_ERROR;
+ if (!internal)
+ modify_fw = true;
+ break;
+ default:
+ break;
+ }
+ break;
+ case QED_IWARP_QP_STATE_RTS:
+ switch (new_state) {
+ case QED_IWARP_QP_STATE_CLOSING:
+ if (!internal)
+ modify_fw = true;
+
+ qp->iwarp_state = QED_IWARP_QP_STATE_CLOSING;
+ break;
+ case QED_IWARP_QP_STATE_ERROR:
+ if (!internal)
+ modify_fw = true;
+ qp->iwarp_state = QED_IWARP_QP_STATE_ERROR;
+ break;
+ default:
+ break;
+ }
+ break;
+ case QED_IWARP_QP_STATE_ERROR:
+ switch (new_state) {
+ case QED_IWARP_QP_STATE_IDLE:
+
+ qp->iwarp_state = new_state;
+ break;
+ case QED_IWARP_QP_STATE_CLOSING:
+ /* could happen due to race... do nothing.... */
+ break;
+ default:
+ rc = -EINVAL;
+ }
+ break;
+ case QED_IWARP_QP_STATE_TERMINATE:
+ case QED_IWARP_QP_STATE_CLOSING:
+ qp->iwarp_state = new_state;
+ break;
+ default:
+ break;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) %s --> %s%s\n",
+ qp->icid,
+ iwarp_state_names[prev_iw_state],
+ iwarp_state_names[qp->iwarp_state],
+ internal ? "internal" : "");
+
+ spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock);
+
+ if (modify_fw)
+ rc = qed_iwarp_modify_fw(p_hwfn, qp);
+
+ return rc;
+}
+
+int qed_iwarp_fw_destroy(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
+{
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ int rc;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qp->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ IWARP_RAMROD_CMD_ID_DESTROY_QP,
+ p_hwfn->p_rdma_info->proto, &init_data);
+ if (rc)
+ return rc;
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) rc = %d\n", qp->icid, rc);
+
+ return rc;
+}
+
+static void qed_iwarp_destroy_ep(struct qed_hwfn *p_hwfn,
+ struct qed_iwarp_ep *ep,
+ bool remove_from_active_list)
+{
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(*ep->ep_buffer_virt),
+ ep->ep_buffer_virt, ep->ep_buffer_phys);
+
+ if (remove_from_active_list) {
+ spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+ list_del(&ep->list_entry);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+ }
+
+ if (ep->qp)
+ ep->qp->ep = NULL;
+
+ kfree(ep);
+}
+
+int qed_iwarp_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
+{
+ struct qed_iwarp_ep *ep = qp->ep;
+ int wait_count = 0;
+ int rc = 0;
+
+ if (qp->iwarp_state != QED_IWARP_QP_STATE_ERROR) {
+ rc = qed_iwarp_modify_qp(p_hwfn, qp,
+ QED_IWARP_QP_STATE_ERROR, false);
+ if (rc)
+ return rc;
+ }
+
+ /* Make sure ep is closed before returning and freeing memory. */
+ if (ep) {
+ while (READ_ONCE(ep->state) != QED_IWARP_EP_CLOSED &&
+ wait_count++ < 200)
+ msleep(100);
+
+ if (ep->state != QED_IWARP_EP_CLOSED)
+ DP_NOTICE(p_hwfn, "ep state close timeout state=%x\n",
+ ep->state);
+
+ qed_iwarp_destroy_ep(p_hwfn, ep, false);
+ }
+
+ rc = qed_iwarp_fw_destroy(p_hwfn, qp);
+
+ if (qp->shared_queue)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ IWARP_SHARED_QUEUE_PAGE_SIZE,
+ qp->shared_queue, qp->shared_queue_phys_addr);
+
+ return rc;
+}
+
+static int
+qed_iwarp_create_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep **ep_out)
+{
+ struct qed_iwarp_ep *ep;
+ int rc;
+
+ ep = kzalloc(sizeof(*ep), GFP_KERNEL);
+ if (!ep)
+ return -ENOMEM;
+
+ ep->state = QED_IWARP_EP_INIT;
+
+ ep->ep_buffer_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(*ep->ep_buffer_virt),
+ &ep->ep_buffer_phys,
+ GFP_KERNEL);
+ if (!ep->ep_buffer_virt) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ ep->sig = QED_EP_SIG;
+
+ *ep_out = ep;
+
+ return 0;
+
+err:
+ kfree(ep);
+ return rc;
+}
+
+static void
+qed_iwarp_print_tcp_ramrod(struct qed_hwfn *p_hwfn,
+ struct iwarp_tcp_offload_ramrod_data *p_tcp_ramrod)
+{
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "local_mac=%x %x %x, remote_mac=%x %x %x\n",
+ p_tcp_ramrod->tcp.local_mac_addr_lo,
+ p_tcp_ramrod->tcp.local_mac_addr_mid,
+ p_tcp_ramrod->tcp.local_mac_addr_hi,
+ p_tcp_ramrod->tcp.remote_mac_addr_lo,
+ p_tcp_ramrod->tcp.remote_mac_addr_mid,
+ p_tcp_ramrod->tcp.remote_mac_addr_hi);
+
+ if (p_tcp_ramrod->tcp.ip_version == TCP_IPV4) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "local_ip=%pI4h:%x, remote_ip=%pI4h:%x, vlan=%x\n",
+ p_tcp_ramrod->tcp.local_ip,
+ p_tcp_ramrod->tcp.local_port,
+ p_tcp_ramrod->tcp.remote_ip,
+ p_tcp_ramrod->tcp.remote_port,
+ p_tcp_ramrod->tcp.vlan_id);
+ } else {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "local_ip=%pI6:%x, remote_ip=%pI6:%x, vlan=%x\n",
+ p_tcp_ramrod->tcp.local_ip,
+ p_tcp_ramrod->tcp.local_port,
+ p_tcp_ramrod->tcp.remote_ip,
+ p_tcp_ramrod->tcp.remote_port,
+ p_tcp_ramrod->tcp.vlan_id);
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "flow_label=%x, ttl=%x, tos_or_tc=%x, mss=%x, rcv_wnd_scale=%x, connect_mode=%x, flags=%x\n",
+ p_tcp_ramrod->tcp.flow_label,
+ p_tcp_ramrod->tcp.ttl,
+ p_tcp_ramrod->tcp.tos_or_tc,
+ p_tcp_ramrod->tcp.mss,
+ p_tcp_ramrod->tcp.rcv_wnd_scale,
+ p_tcp_ramrod->tcp.connect_mode,
+ p_tcp_ramrod->tcp.flags);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "syn_ip_payload_length=%x, lo=%x, hi=%x\n",
+ p_tcp_ramrod->tcp.syn_ip_payload_length,
+ p_tcp_ramrod->tcp.syn_phy_addr_lo,
+ p_tcp_ramrod->tcp.syn_phy_addr_hi);
+}
+
+static int
+qed_iwarp_tcp_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
+{
+ struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
+ struct iwarp_tcp_offload_ramrod_data *p_tcp_ramrod;
+ struct tcp_offload_params_opt2 *tcp;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ dma_addr_t async_output_phys;
+ dma_addr_t in_pdata_phys;
+ u16 physical_q;
+ u16 flags = 0;
+ u8 tcp_flags;
+ int rc;
+ int i;
+
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = ep->tcp_cid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ if (ep->connect_mode == TCP_CONNECT_PASSIVE)
+ init_data.comp_mode = QED_SPQ_MODE_CB;
+ else
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ IWARP_RAMROD_CMD_ID_TCP_OFFLOAD,
+ PROTOCOLID_IWARP, &init_data);
+ if (rc)
+ return rc;
+
+ p_tcp_ramrod = &p_ent->ramrod.iwarp_tcp_offload;
+
+ in_pdata_phys = ep->ep_buffer_phys +
+ offsetof(struct qed_iwarp_ep_memory, in_pdata);
+ DMA_REGPAIR_LE(p_tcp_ramrod->iwarp.incoming_ulp_buffer.addr,
+ in_pdata_phys);
+
+ p_tcp_ramrod->iwarp.incoming_ulp_buffer.len =
+ cpu_to_le16(sizeof(ep->ep_buffer_virt->in_pdata));
+
+ async_output_phys = ep->ep_buffer_phys +
+ offsetof(struct qed_iwarp_ep_memory, async_output);
+ DMA_REGPAIR_LE(p_tcp_ramrod->iwarp.async_eqe_output_buf,
+ async_output_phys);
+
+ p_tcp_ramrod->iwarp.handle_for_async.hi = cpu_to_le32(PTR_HI(ep));
+ p_tcp_ramrod->iwarp.handle_for_async.lo = cpu_to_le32(PTR_LO(ep));
+
+ physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
+ p_tcp_ramrod->iwarp.physical_q0 = cpu_to_le16(physical_q);
+ physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK);
+ p_tcp_ramrod->iwarp.physical_q1 = cpu_to_le16(physical_q);
+ p_tcp_ramrod->iwarp.mpa_mode = iwarp_info->mpa_rev;
+
+ tcp = &p_tcp_ramrod->tcp;
+ qed_set_fw_mac_addr(&tcp->remote_mac_addr_hi,
+ &tcp->remote_mac_addr_mid,
+ &tcp->remote_mac_addr_lo, ep->remote_mac_addr);
+ qed_set_fw_mac_addr(&tcp->local_mac_addr_hi, &tcp->local_mac_addr_mid,
+ &tcp->local_mac_addr_lo, ep->local_mac_addr);
+
+ tcp->vlan_id = cpu_to_le16(ep->cm_info.vlan);
+
+ tcp_flags = p_hwfn->p_rdma_info->iwarp.tcp_flags;
+
+ SET_FIELD(flags, TCP_OFFLOAD_PARAMS_OPT2_TS_EN,
+ !!(tcp_flags & QED_IWARP_TS_EN));
+
+ SET_FIELD(flags, TCP_OFFLOAD_PARAMS_OPT2_DA_EN,
+ !!(tcp_flags & QED_IWARP_DA_EN));
+
+ tcp->flags = cpu_to_le16(flags);
+ tcp->ip_version = ep->cm_info.ip_version;
+
+ for (i = 0; i < 4; i++) {
+ tcp->remote_ip[i] = cpu_to_le32(ep->cm_info.remote_ip[i]);
+ tcp->local_ip[i] = cpu_to_le32(ep->cm_info.local_ip[i]);
+ }
+
+ tcp->remote_port = cpu_to_le16(ep->cm_info.remote_port);
+ tcp->local_port = cpu_to_le16(ep->cm_info.local_port);
+ tcp->mss = cpu_to_le16(ep->mss);
+ tcp->flow_label = 0;
+ tcp->ttl = 0x40;
+ tcp->tos_or_tc = 0;
+
+ tcp->max_rt_time = QED_IWARP_DEF_MAX_RT_TIME;
+ tcp->cwnd = cpu_to_le32(QED_IWARP_DEF_CWND_FACTOR * ep->mss);
+ tcp->ka_max_probe_cnt = QED_IWARP_DEF_KA_MAX_PROBE_CNT;
+ tcp->ka_timeout = cpu_to_le32(QED_IWARP_DEF_KA_TIMEOUT);
+ tcp->ka_interval = cpu_to_le32(QED_IWARP_DEF_KA_INTERVAL);
+
+ tcp->rcv_wnd_scale = (u8)p_hwfn->p_rdma_info->iwarp.rcv_wnd_scale;
+ tcp->connect_mode = ep->connect_mode;
+
+ if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
+ tcp->syn_ip_payload_length =
+ cpu_to_le16(ep->syn_ip_payload_length);
+ tcp->syn_phy_addr_hi = DMA_HI_LE(ep->syn_phy_addr);
+ tcp->syn_phy_addr_lo = DMA_LO_LE(ep->syn_phy_addr);
+ }
+
+ qed_iwarp_print_tcp_ramrod(p_hwfn, p_tcp_ramrod);
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "EP(0x%x) Offload completed rc=%d\n", ep->tcp_cid, rc);
+
+ return rc;
+}
+
+static void
+qed_iwarp_mpa_received(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
+{
+ struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
+ struct qed_iwarp_cm_event_params params;
+ struct mpa_v2_hdr *mpa_v2;
+ union async_output *async_data;
+ u16 mpa_ord, mpa_ird;
+ u8 mpa_hdr_size = 0;
+ u16 ulp_data_len;
+ u8 mpa_rev;
+
+ async_data = &ep->ep_buffer_virt->async_output;
+
+ mpa_rev = async_data->mpa_request.mpa_handshake_mode;
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "private_data_len=%x handshake_mode=%x private_data=(%x)\n",
+ async_data->mpa_request.ulp_data_len,
+ mpa_rev, *((u32 *)(ep->ep_buffer_virt->in_pdata)));
+
+ if (mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED) {
+ /* Read ord/ird values from private data buffer */
+ mpa_v2 = (struct mpa_v2_hdr *)ep->ep_buffer_virt->in_pdata;
+ mpa_hdr_size = sizeof(*mpa_v2);
+
+ mpa_ord = ntohs(mpa_v2->ord);
+ mpa_ird = ntohs(mpa_v2->ird);
+
+ /* Temprary store in cm_info incoming ord/ird requested, later
+ * replace with negotiated value during accept
+ */
+ ep->cm_info.ord = (u8)min_t(u16,
+ (mpa_ord & MPA_V2_IRD_ORD_MASK),
+ QED_IWARP_ORD_DEFAULT);
+
+ ep->cm_info.ird = (u8)min_t(u16,
+ (mpa_ird & MPA_V2_IRD_ORD_MASK),
+ QED_IWARP_IRD_DEFAULT);
+
+ /* Peer2Peer negotiation */
+ ep->rtr_type = MPA_RTR_TYPE_NONE;
+ if (mpa_ird & MPA_V2_PEER2PEER_MODEL) {
+ if (mpa_ord & MPA_V2_WRITE_RTR)
+ ep->rtr_type |= MPA_RTR_TYPE_ZERO_WRITE;
+
+ if (mpa_ord & MPA_V2_READ_RTR)
+ ep->rtr_type |= MPA_RTR_TYPE_ZERO_READ;
+
+ if (mpa_ird & MPA_V2_SEND_RTR)
+ ep->rtr_type |= MPA_RTR_TYPE_ZERO_SEND;
+
+ ep->rtr_type &= iwarp_info->rtr_type;
+
+ /* if we're left with no match send our capabilities */
+ if (ep->rtr_type == MPA_RTR_TYPE_NONE)
+ ep->rtr_type = iwarp_info->rtr_type;
+ }
+
+ ep->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED;
+ } else {
+ ep->cm_info.ord = QED_IWARP_ORD_DEFAULT;
+ ep->cm_info.ird = QED_IWARP_IRD_DEFAULT;
+ ep->mpa_rev = MPA_NEGOTIATION_TYPE_BASIC;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x rtr:0x%x ulp_data_len = %x mpa_hdr_size = %x\n",
+ mpa_rev, ep->cm_info.ord, ep->cm_info.ird, ep->rtr_type,
+ async_data->mpa_request.ulp_data_len, mpa_hdr_size);
+
+ /* Strip mpa v2 hdr from private data before sending to upper layer */
+ ep->cm_info.private_data = ep->ep_buffer_virt->in_pdata + mpa_hdr_size;
+
+ ulp_data_len = le16_to_cpu(async_data->mpa_request.ulp_data_len);
+ ep->cm_info.private_data_len = ulp_data_len - mpa_hdr_size;
+
+ params.event = QED_IWARP_EVENT_MPA_REQUEST;
+ params.cm_info = &ep->cm_info;
+ params.ep_context = ep;
+ params.status = 0;
+
+ ep->state = QED_IWARP_EP_MPA_REQ_RCVD;
+ ep->event_cb(ep->cb_context, &params);
+}
+
+static int
+qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
+{
+ struct iwarp_mpa_offload_ramrod_data *p_mpa_ramrod;
+ struct mpa_outgoing_params *common;
+ struct qed_iwarp_info *iwarp_info;
+ struct qed_sp_init_data init_data;
+ dma_addr_t async_output_phys;
+ struct qed_spq_entry *p_ent;
+ dma_addr_t out_pdata_phys;
+ dma_addr_t in_pdata_phys;
+ struct qed_rdma_qp *qp;
+ bool reject;
+ u32 val;
+ int rc;
+
+ if (!ep)
+ return -EINVAL;
+
+ qp = ep->qp;
+ reject = !qp;
+
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = reject ? ep->tcp_cid : qp->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+
+ if (ep->connect_mode == TCP_CONNECT_ACTIVE)
+ init_data.comp_mode = QED_SPQ_MODE_CB;
+ else
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ IWARP_RAMROD_CMD_ID_MPA_OFFLOAD,
+ PROTOCOLID_IWARP, &init_data);
+ if (rc)
+ return rc;
+
+ p_mpa_ramrod = &p_ent->ramrod.iwarp_mpa_offload;
+ common = &p_mpa_ramrod->common;
+
+ out_pdata_phys = ep->ep_buffer_phys +
+ offsetof(struct qed_iwarp_ep_memory, out_pdata);
+ DMA_REGPAIR_LE(common->outgoing_ulp_buffer.addr, out_pdata_phys);
+
+ val = ep->cm_info.private_data_len;
+ common->outgoing_ulp_buffer.len = cpu_to_le16(val);
+ common->crc_needed = p_hwfn->p_rdma_info->iwarp.crc_needed;
+
+ common->out_rq.ord = cpu_to_le32(ep->cm_info.ord);
+ common->out_rq.ird = cpu_to_le32(ep->cm_info.ird);
+
+ val = p_hwfn->hw_info.opaque_fid << 16 | ep->tcp_cid;
+ p_mpa_ramrod->tcp_cid = cpu_to_le32(val);
+
+ in_pdata_phys = ep->ep_buffer_phys +
+ offsetof(struct qed_iwarp_ep_memory, in_pdata);
+ p_mpa_ramrod->tcp_connect_side = ep->connect_mode;
+ DMA_REGPAIR_LE(p_mpa_ramrod->incoming_ulp_buffer.addr,
+ in_pdata_phys);
+ p_mpa_ramrod->incoming_ulp_buffer.len =
+ cpu_to_le16(sizeof(ep->ep_buffer_virt->in_pdata));
+ async_output_phys = ep->ep_buffer_phys +
+ offsetof(struct qed_iwarp_ep_memory, async_output);
+ DMA_REGPAIR_LE(p_mpa_ramrod->async_eqe_output_buf,
+ async_output_phys);
+ p_mpa_ramrod->handle_for_async.hi = cpu_to_le32(PTR_HI(ep));
+ p_mpa_ramrod->handle_for_async.lo = cpu_to_le32(PTR_LO(ep));
+
+ if (!reject) {
+ DMA_REGPAIR_LE(p_mpa_ramrod->shared_queue_addr,
+ qp->shared_queue_phys_addr);
+ p_mpa_ramrod->stats_counter_id =
+ RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) + qp->stats_queue;
+ } else {
+ common->reject = 1;
+ }
+
+ iwarp_info = &p_hwfn->p_rdma_info->iwarp;
+ p_mpa_ramrod->rcv_wnd = cpu_to_le16(iwarp_info->rcv_wnd_size);
+ p_mpa_ramrod->mode = ep->mpa_rev;
+ SET_FIELD(p_mpa_ramrod->rtr_pref,
+ IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED, ep->rtr_type);
+
+ ep->state = QED_IWARP_EP_MPA_OFFLOADED;
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (!reject)
+ ep->cid = qp->icid; /* Now they're migrated. */
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_RDMA,
+ "QP(0x%x) EP(0x%x) MPA Offload rc = %d IRD=0x%x ORD=0x%x rtr_type=%d mpa_rev=%d reject=%d\n",
+ reject ? 0xffff : qp->icid,
+ ep->tcp_cid,
+ rc,
+ ep->cm_info.ird,
+ ep->cm_info.ord, ep->rtr_type, ep->mpa_rev, reject);
+ return rc;
+}
+
+static void
+qed_iwarp_return_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
+{
+ ep->state = QED_IWARP_EP_INIT;
+ if (ep->qp)
+ ep->qp->ep = NULL;
+ ep->qp = NULL;
+ memset(&ep->cm_info, 0, sizeof(ep->cm_info));
+
+ if (ep->tcp_cid == QED_IWARP_INVALID_TCP_CID) {
+ /* We don't care about the return code, it's ok if tcp_cid
+ * remains invalid...in this case we'll defer allocation
+ */
+ qed_iwarp_alloc_tcp_cid(p_hwfn, &ep->tcp_cid);
+ }
+ spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+
+ list_move_tail(&ep->list_entry,
+ &p_hwfn->p_rdma_info->iwarp.ep_free_list);
+
+ spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+}
+
+static void
+qed_iwarp_parse_private_data(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
+{
+ struct mpa_v2_hdr *mpa_v2_params;
+ union async_output *async_data;
+ u16 mpa_ird, mpa_ord;
+ u8 mpa_data_size = 0;
+ u16 ulp_data_len;
+
+ if (MPA_REV2(p_hwfn->p_rdma_info->iwarp.mpa_rev)) {
+ mpa_v2_params =
+ (struct mpa_v2_hdr *)(ep->ep_buffer_virt->in_pdata);
+ mpa_data_size = sizeof(*mpa_v2_params);
+ mpa_ird = ntohs(mpa_v2_params->ird);
+ mpa_ord = ntohs(mpa_v2_params->ord);
+
+ ep->cm_info.ird = (u8)(mpa_ord & MPA_V2_IRD_ORD_MASK);
+ ep->cm_info.ord = (u8)(mpa_ird & MPA_V2_IRD_ORD_MASK);
+ }
+
+ async_data = &ep->ep_buffer_virt->async_output;
+ ep->cm_info.private_data = ep->ep_buffer_virt->in_pdata + mpa_data_size;
+
+ ulp_data_len = le16_to_cpu(async_data->mpa_response.ulp_data_len);
+ ep->cm_info.private_data_len = ulp_data_len - mpa_data_size;
+}
+
+static void
+qed_iwarp_mpa_reply_arrived(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
+{
+ struct qed_iwarp_cm_event_params params;
+
+ if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
+ DP_NOTICE(p_hwfn,
+ "MPA reply event not expected on passive side!\n");
+ return;
+ }
+
+ params.event = QED_IWARP_EVENT_ACTIVE_MPA_REPLY;
+
+ qed_iwarp_parse_private_data(p_hwfn, ep);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x\n",
+ ep->mpa_rev, ep->cm_info.ord, ep->cm_info.ird);
+
+ params.cm_info = &ep->cm_info;
+ params.ep_context = ep;
+ params.status = 0;
+
+ ep->mpa_reply_processed = true;
+
+ ep->event_cb(ep->cb_context, &params);
+}
+
+#define QED_IWARP_CONNECT_MODE_STRING(ep) \
+ ((ep)->connect_mode == TCP_CONNECT_PASSIVE) ? "Passive" : "Active"
+
+/* Called as a result of the event:
+ * IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE
+ */
+static void
+qed_iwarp_mpa_complete(struct qed_hwfn *p_hwfn,
+ struct qed_iwarp_ep *ep, u8 fw_return_code)
+{
+ struct qed_iwarp_cm_event_params params;
+
+ if (ep->connect_mode == TCP_CONNECT_ACTIVE)
+ params.event = QED_IWARP_EVENT_ACTIVE_COMPLETE;
+ else
+ params.event = QED_IWARP_EVENT_PASSIVE_COMPLETE;
+
+ if (ep->connect_mode == TCP_CONNECT_ACTIVE && !ep->mpa_reply_processed)
+ qed_iwarp_parse_private_data(p_hwfn, ep);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x\n",
+ ep->mpa_rev, ep->cm_info.ord, ep->cm_info.ird);
+
+ params.cm_info = &ep->cm_info;
+
+ params.ep_context = ep;
+
+ switch (fw_return_code) {
+ case RDMA_RETURN_OK:
+ ep->qp->max_rd_atomic_req = ep->cm_info.ord;
+ ep->qp->max_rd_atomic_resp = ep->cm_info.ird;
+ qed_iwarp_modify_qp(p_hwfn, ep->qp, QED_IWARP_QP_STATE_RTS, 1);
+ ep->state = QED_IWARP_EP_ESTABLISHED;
+ params.status = 0;
+ break;
+ case IWARP_CONN_ERROR_MPA_TIMEOUT:
+ DP_NOTICE(p_hwfn, "%s(0x%x) MPA timeout\n",
+ QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
+ params.status = -EBUSY;
+ break;
+ case IWARP_CONN_ERROR_MPA_ERROR_REJECT:
+ DP_NOTICE(p_hwfn, "%s(0x%x) MPA Reject\n",
+ QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
+ params.status = -ECONNREFUSED;
+ break;
+ case IWARP_CONN_ERROR_MPA_RST:
+ DP_NOTICE(p_hwfn, "%s(0x%x) MPA reset(tcp cid: 0x%x)\n",
+ QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid,
+ ep->tcp_cid);
+ params.status = -ECONNRESET;
+ break;
+ case IWARP_CONN_ERROR_MPA_FIN:
+ DP_NOTICE(p_hwfn, "%s(0x%x) MPA received FIN\n",
+ QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
+ params.status = -ECONNREFUSED;
+ break;
+ case IWARP_CONN_ERROR_MPA_INSUF_IRD:
+ DP_NOTICE(p_hwfn, "%s(0x%x) MPA insufficient ird\n",
+ QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
+ params.status = -ECONNREFUSED;
+ break;
+ case IWARP_CONN_ERROR_MPA_RTR_MISMATCH:
+ DP_NOTICE(p_hwfn, "%s(0x%x) MPA RTR MISMATCH\n",
+ QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
+ params.status = -ECONNREFUSED;
+ break;
+ case IWARP_CONN_ERROR_MPA_INVALID_PACKET:
+ DP_NOTICE(p_hwfn, "%s(0x%x) MPA Invalid Packet\n",
+ QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
+ params.status = -ECONNREFUSED;
+ break;
+ case IWARP_CONN_ERROR_MPA_LOCAL_ERROR:
+ DP_NOTICE(p_hwfn, "%s(0x%x) MPA Local Error\n",
+ QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
+ params.status = -ECONNREFUSED;
+ break;
+ case IWARP_CONN_ERROR_MPA_TERMINATE:
+ DP_NOTICE(p_hwfn, "%s(0x%x) MPA TERMINATE\n",
+ QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid);
+ params.status = -ECONNREFUSED;
+ break;
+ default:
+ params.status = -ECONNRESET;
+ break;
+ }
+
+ if (fw_return_code != RDMA_RETURN_OK)
+ /* paired with READ_ONCE in destroy_qp */
+ smp_store_release(&ep->state, QED_IWARP_EP_CLOSED);
+
+ ep->event_cb(ep->cb_context, &params);
+
+ /* on passive side, if there is no associated QP (REJECT) we need to
+ * return the ep to the pool, (in the regular case we add an element
+ * in accept instead of this one.
+ * In both cases we need to remove it from the ep_list.
+ */
+ if (fw_return_code != RDMA_RETURN_OK) {
+ ep->tcp_cid = QED_IWARP_INVALID_TCP_CID;
+ if ((ep->connect_mode == TCP_CONNECT_PASSIVE) &&
+ (!ep->qp)) { /* Rejected */
+ qed_iwarp_return_ep(p_hwfn, ep);
+ } else {
+ spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+ list_del(&ep->list_entry);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+ }
+ }
+}
+
+static void
+qed_iwarp_mpa_v2_set_private(struct qed_hwfn *p_hwfn,
+ struct qed_iwarp_ep *ep, u8 *mpa_data_size)
+{
+ struct mpa_v2_hdr *mpa_v2_params;
+ u16 mpa_ird, mpa_ord;
+
+ *mpa_data_size = 0;
+ if (MPA_REV2(ep->mpa_rev)) {
+ mpa_v2_params =
+ (struct mpa_v2_hdr *)ep->ep_buffer_virt->out_pdata;
+ *mpa_data_size = sizeof(*mpa_v2_params);
+
+ mpa_ird = (u16)ep->cm_info.ird;
+ mpa_ord = (u16)ep->cm_info.ord;
+
+ if (ep->rtr_type != MPA_RTR_TYPE_NONE) {
+ mpa_ird |= MPA_V2_PEER2PEER_MODEL;
+
+ if (ep->rtr_type & MPA_RTR_TYPE_ZERO_SEND)
+ mpa_ird |= MPA_V2_SEND_RTR;
+
+ if (ep->rtr_type & MPA_RTR_TYPE_ZERO_WRITE)
+ mpa_ord |= MPA_V2_WRITE_RTR;
+
+ if (ep->rtr_type & MPA_RTR_TYPE_ZERO_READ)
+ mpa_ord |= MPA_V2_READ_RTR;
+ }
+
+ mpa_v2_params->ird = htons(mpa_ird);
+ mpa_v2_params->ord = htons(mpa_ord);
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_RDMA,
+ "MPA_NEGOTIATE Header: [%x ord:%x ird] %x ord:%x ird:%x peer2peer:%x rtr_send:%x rtr_write:%x rtr_read:%x\n",
+ mpa_v2_params->ird,
+ mpa_v2_params->ord,
+ *((u32 *)mpa_v2_params),
+ mpa_ord & MPA_V2_IRD_ORD_MASK,
+ mpa_ird & MPA_V2_IRD_ORD_MASK,
+ !!(mpa_ird & MPA_V2_PEER2PEER_MODEL),
+ !!(mpa_ird & MPA_V2_SEND_RTR),
+ !!(mpa_ord & MPA_V2_WRITE_RTR),
+ !!(mpa_ord & MPA_V2_READ_RTR));
+ }
+}
+
+int qed_iwarp_connect(void *rdma_cxt,
+ struct qed_iwarp_connect_in *iparams,
+ struct qed_iwarp_connect_out *oparams)
+{
+ struct qed_hwfn *p_hwfn = rdma_cxt;
+ struct qed_iwarp_info *iwarp_info;
+ struct qed_iwarp_ep *ep;
+ u8 mpa_data_size = 0;
+ u32 cid;
+ int rc;
+
+ if ((iparams->cm_info.ord > QED_IWARP_ORD_DEFAULT) ||
+ (iparams->cm_info.ird > QED_IWARP_IRD_DEFAULT)) {
+ DP_NOTICE(p_hwfn,
+ "QP(0x%x) ERROR: Invalid ord(0x%x)/ird(0x%x)\n",
+ iparams->qp->icid, iparams->cm_info.ord,
+ iparams->cm_info.ird);
+
+ return -EINVAL;
+ }
+
+ iwarp_info = &p_hwfn->p_rdma_info->iwarp;
+
+ /* Allocate ep object */
+ rc = qed_iwarp_alloc_cid(p_hwfn, &cid);
+ if (rc)
+ return rc;
+
+ rc = qed_iwarp_create_ep(p_hwfn, &ep);
+ if (rc)
+ goto err;
+
+ ep->tcp_cid = cid;
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+ list_add_tail(&ep->list_entry, &p_hwfn->p_rdma_info->iwarp.ep_list);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+
+ ep->qp = iparams->qp;
+ ep->qp->ep = ep;
+ ether_addr_copy(ep->remote_mac_addr, iparams->remote_mac_addr);
+ ether_addr_copy(ep->local_mac_addr, iparams->local_mac_addr);
+ memcpy(&ep->cm_info, &iparams->cm_info, sizeof(ep->cm_info));
+
+ ep->cm_info.ord = iparams->cm_info.ord;
+ ep->cm_info.ird = iparams->cm_info.ird;
+
+ ep->rtr_type = iwarp_info->rtr_type;
+ if (!iwarp_info->peer2peer)
+ ep->rtr_type = MPA_RTR_TYPE_NONE;
+
+ if ((ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) && (ep->cm_info.ord == 0))
+ ep->cm_info.ord = 1;
+
+ ep->mpa_rev = iwarp_info->mpa_rev;
+
+ qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size);
+
+ ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata;
+ ep->cm_info.private_data_len = iparams->cm_info.private_data_len +
+ mpa_data_size;
+
+ memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size,
+ iparams->cm_info.private_data,
+ iparams->cm_info.private_data_len);
+
+ ep->mss = iparams->mss;
+ ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss);
+
+ ep->event_cb = iparams->event_cb;
+ ep->cb_context = iparams->cb_context;
+ ep->connect_mode = TCP_CONNECT_ACTIVE;
+
+ oparams->ep_context = ep;
+
+ rc = qed_iwarp_tcp_offload(p_hwfn, ep);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x) rc = %d\n",
+ iparams->qp->icid, ep->tcp_cid, rc);
+
+ if (rc) {
+ qed_iwarp_destroy_ep(p_hwfn, ep, true);
+ goto err;
+ }
+
+ return rc;
+err:
+ qed_iwarp_cid_cleaned(p_hwfn, cid);
+
+ return rc;
+}
+
+static struct qed_iwarp_ep *qed_iwarp_get_free_ep(struct qed_hwfn *p_hwfn)
+{
+ struct qed_iwarp_ep *ep = NULL;
+ int rc;
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+
+ if (list_empty(&p_hwfn->p_rdma_info->iwarp.ep_free_list)) {
+ DP_ERR(p_hwfn, "Ep list is empty\n");
+ goto out;
+ }
+
+ ep = list_first_entry(&p_hwfn->p_rdma_info->iwarp.ep_free_list,
+ struct qed_iwarp_ep, list_entry);
+
+ /* in some cases we could have failed allocating a tcp cid when added
+ * from accept / failure... retry now..this is not the common case.
+ */
+ if (ep->tcp_cid == QED_IWARP_INVALID_TCP_CID) {
+ rc = qed_iwarp_alloc_tcp_cid(p_hwfn, &ep->tcp_cid);
+
+ /* if we fail we could look for another entry with a valid
+ * tcp_cid, but since we don't expect to reach this anyway
+ * it's not worth the handling
+ */
+ if (rc) {
+ ep->tcp_cid = QED_IWARP_INVALID_TCP_CID;
+ ep = NULL;
+ goto out;
+ }
+ }
+
+ list_del(&ep->list_entry);
+
+out:
+ spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+ return ep;
+}
+
+#define QED_IWARP_MAX_CID_CLEAN_TIME 100
+#define QED_IWARP_MAX_NO_PROGRESS_CNT 5
+
+/* This function waits for all the bits of a bmap to be cleared, as long as
+ * there is progress ( i.e. the number of bits left to be cleared decreases )
+ * the function continues.
+ */
+static int
+qed_iwarp_wait_cid_map_cleared(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap)
+{
+ int prev_weight = 0;
+ int wait_count = 0;
+ int weight = 0;
+
+ weight = bitmap_weight(bmap->bitmap, bmap->max_count);
+ prev_weight = weight;
+
+ while (weight) {
+ /* If the HW device is during recovery, all resources are
+ * immediately reset without receiving a per-cid indication
+ * from HW. In this case we don't expect the cid_map to be
+ * cleared.
+ */
+ if (p_hwfn->cdev->recov_in_prog)
+ return 0;
+
+ msleep(QED_IWARP_MAX_CID_CLEAN_TIME);
+
+ weight = bitmap_weight(bmap->bitmap, bmap->max_count);
+
+ if (prev_weight == weight) {
+ wait_count++;
+ } else {
+ prev_weight = weight;
+ wait_count = 0;
+ }
+
+ if (wait_count > QED_IWARP_MAX_NO_PROGRESS_CNT) {
+ DP_NOTICE(p_hwfn,
+ "%s bitmap wait timed out (%d cids pending)\n",
+ bmap->name, weight);
+ return -EBUSY;
+ }
+ }
+ return 0;
+}
+
+static int qed_iwarp_wait_for_all_cids(struct qed_hwfn *p_hwfn)
+{
+ int rc;
+ int i;
+
+ rc = qed_iwarp_wait_cid_map_cleared(p_hwfn,
+ &p_hwfn->p_rdma_info->tcp_cid_map);
+ if (rc)
+ return rc;
+
+ /* Now free the tcp cids from the main cid map */
+ for (i = 0; i < QED_IWARP_PREALLOC_CNT; i++)
+ qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, i);
+
+ /* Now wait for all cids to be completed */
+ return qed_iwarp_wait_cid_map_cleared(p_hwfn,
+ &p_hwfn->p_rdma_info->cid_map);
+}
+
+static void qed_iwarp_free_prealloc_ep(struct qed_hwfn *p_hwfn)
+{
+ struct qed_iwarp_ep *ep;
+
+ while (!list_empty(&p_hwfn->p_rdma_info->iwarp.ep_free_list)) {
+ spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+
+ ep = list_first_entry(&p_hwfn->p_rdma_info->iwarp.ep_free_list,
+ struct qed_iwarp_ep, list_entry);
+
+ if (!ep) {
+ spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+ break;
+ }
+ list_del(&ep->list_entry);
+
+ spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+
+ if (ep->tcp_cid != QED_IWARP_INVALID_TCP_CID)
+ qed_iwarp_cid_cleaned(p_hwfn, ep->tcp_cid);
+
+ qed_iwarp_destroy_ep(p_hwfn, ep, false);
+ }
+}
+
+static int qed_iwarp_prealloc_ep(struct qed_hwfn *p_hwfn, bool init)
+{
+ struct qed_iwarp_ep *ep;
+ int rc = 0;
+ int count;
+ u32 cid;
+ int i;
+
+ count = init ? QED_IWARP_PREALLOC_CNT : 1;
+ for (i = 0; i < count; i++) {
+ rc = qed_iwarp_create_ep(p_hwfn, &ep);
+ if (rc)
+ return rc;
+
+ /* During initialization we allocate from the main pool,
+ * afterwards we allocate only from the tcp_cid.
+ */
+ if (init) {
+ rc = qed_iwarp_alloc_cid(p_hwfn, &cid);
+ if (rc)
+ goto err;
+ qed_iwarp_set_tcp_cid(p_hwfn, cid);
+ } else {
+ /* We don't care about the return code, it's ok if
+ * tcp_cid remains invalid...in this case we'll
+ * defer allocation
+ */
+ qed_iwarp_alloc_tcp_cid(p_hwfn, &cid);
+ }
+
+ ep->tcp_cid = cid;
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+ list_add_tail(&ep->list_entry,
+ &p_hwfn->p_rdma_info->iwarp.ep_free_list);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+ }
+
+ return rc;
+
+err:
+ qed_iwarp_destroy_ep(p_hwfn, ep, false);
+
+ return rc;
+}
+
+int qed_iwarp_alloc(struct qed_hwfn *p_hwfn)
+{
+ int rc;
+
+ /* Allocate bitmap for tcp cid. These are used by passive side
+ * to ensure it can allocate a tcp cid during dpc that was
+ * pre-acquired and doesn't require dynamic allocation of ilt
+ */
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map,
+ QED_IWARP_PREALLOC_CNT, "TCP_CID");
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate tcp cid, rc = %d\n", rc);
+ return rc;
+ }
+
+ INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.ep_free_list);
+ spin_lock_init(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+
+ rc = qed_iwarp_prealloc_ep(p_hwfn, true);
+ if (rc)
+ return rc;
+
+ return qed_ooo_alloc(p_hwfn);
+}
+
+void qed_iwarp_resc_free(struct qed_hwfn *p_hwfn)
+{
+ struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
+
+ qed_ooo_free(p_hwfn);
+ qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, 1);
+ kfree(iwarp_info->mpa_bufs);
+ kfree(iwarp_info->partial_fpdus);
+ kfree(iwarp_info->mpa_intermediate_buf);
+}
+
+int qed_iwarp_accept(void *rdma_cxt, struct qed_iwarp_accept_in *iparams)
+{
+ struct qed_hwfn *p_hwfn = rdma_cxt;
+ struct qed_iwarp_ep *ep;
+ u8 mpa_data_size = 0;
+ int rc;
+
+ ep = iparams->ep_context;
+ if (!ep) {
+ DP_ERR(p_hwfn, "Ep Context receive in accept is NULL\n");
+ return -EINVAL;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x)\n",
+ iparams->qp->icid, ep->tcp_cid);
+
+ if ((iparams->ord > QED_IWARP_ORD_DEFAULT) ||
+ (iparams->ird > QED_IWARP_IRD_DEFAULT)) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_RDMA,
+ "QP(0x%x) EP(0x%x) ERROR: Invalid ord(0x%x)/ird(0x%x)\n",
+ iparams->qp->icid,
+ ep->tcp_cid, iparams->ord, iparams->ord);
+ return -EINVAL;
+ }
+
+ qed_iwarp_prealloc_ep(p_hwfn, false);
+
+ ep->cb_context = iparams->cb_context;
+ ep->qp = iparams->qp;
+ ep->qp->ep = ep;
+
+ if (ep->mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED) {
+ /* Negotiate ord/ird: if upperlayer requested ord larger than
+ * ird advertised by remote, we need to decrease our ord
+ */
+ if (iparams->ord > ep->cm_info.ird)
+ iparams->ord = ep->cm_info.ird;
+
+ if ((ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) &&
+ (iparams->ird == 0))
+ iparams->ird = 1;
+ }
+
+ /* Update cm_info ord/ird to be negotiated values */
+ ep->cm_info.ord = iparams->ord;
+ ep->cm_info.ird = iparams->ird;
+
+ qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size);
+
+ ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata;
+ ep->cm_info.private_data_len = iparams->private_data_len +
+ mpa_data_size;
+
+ memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size,
+ iparams->private_data, iparams->private_data_len);
+
+ rc = qed_iwarp_mpa_offload(p_hwfn, ep);
+ if (rc)
+ qed_iwarp_modify_qp(p_hwfn,
+ iparams->qp, QED_IWARP_QP_STATE_ERROR, 1);
+
+ return rc;
+}
+
+int qed_iwarp_reject(void *rdma_cxt, struct qed_iwarp_reject_in *iparams)
+{
+ struct qed_hwfn *p_hwfn = rdma_cxt;
+ struct qed_iwarp_ep *ep;
+ u8 mpa_data_size = 0;
+
+ ep = iparams->ep_context;
+ if (!ep) {
+ DP_ERR(p_hwfn, "Ep Context receive in reject is NULL\n");
+ return -EINVAL;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "EP(0x%x)\n", ep->tcp_cid);
+
+ ep->cb_context = iparams->cb_context;
+ ep->qp = NULL;
+
+ qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size);
+
+ ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata;
+ ep->cm_info.private_data_len = iparams->private_data_len +
+ mpa_data_size;
+
+ memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size,
+ iparams->private_data, iparams->private_data_len);
+
+ return qed_iwarp_mpa_offload(p_hwfn, ep);
+}
+
+static void
+qed_iwarp_print_cm_info(struct qed_hwfn *p_hwfn,
+ struct qed_iwarp_cm_info *cm_info)
+{
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "ip_version = %d\n",
+ cm_info->ip_version);
+
+ if (cm_info->ip_version == QED_TCP_IPV4)
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "remote_ip %pI4h:%x, local_ip %pI4h:%x vlan=%x\n",
+ cm_info->remote_ip, cm_info->remote_port,
+ cm_info->local_ip, cm_info->local_port,
+ cm_info->vlan);
+ else
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "remote_ip %pI6:%x, local_ip %pI6:%x vlan=%x\n",
+ cm_info->remote_ip, cm_info->remote_port,
+ cm_info->local_ip, cm_info->local_port,
+ cm_info->vlan);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "private_data_len = %x ord = %d, ird = %d\n",
+ cm_info->private_data_len, cm_info->ord, cm_info->ird);
+}
+
+static int
+qed_iwarp_ll2_post_rx(struct qed_hwfn *p_hwfn,
+ struct qed_iwarp_ll2_buff *buf, u8 handle)
+{
+ int rc;
+
+ rc = qed_ll2_post_rx_buffer(p_hwfn, handle, buf->data_phys_addr,
+ (u16)buf->buff_size, buf, 1);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "Failed to repost rx buffer to ll2 rc = %d, handle=%d\n",
+ rc, handle);
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev, buf->buff_size,
+ buf->data, buf->data_phys_addr);
+ kfree(buf);
+ }
+
+ return rc;
+}
+
+static bool
+qed_iwarp_ep_exists(struct qed_hwfn *p_hwfn, struct qed_iwarp_cm_info *cm_info)
+{
+ struct qed_iwarp_ep *ep = NULL;
+ bool found = false;
+
+ list_for_each_entry(ep,
+ &p_hwfn->p_rdma_info->iwarp.ep_list,
+ list_entry) {
+ if ((ep->cm_info.local_port == cm_info->local_port) &&
+ (ep->cm_info.remote_port == cm_info->remote_port) &&
+ (ep->cm_info.vlan == cm_info->vlan) &&
+ !memcmp(&ep->cm_info.local_ip, cm_info->local_ip,
+ sizeof(cm_info->local_ip)) &&
+ !memcmp(&ep->cm_info.remote_ip, cm_info->remote_ip,
+ sizeof(cm_info->remote_ip))) {
+ found = true;
+ break;
+ }
+ }
+
+ if (found) {
+ DP_NOTICE(p_hwfn,
+ "SYN received on active connection - dropping\n");
+ qed_iwarp_print_cm_info(p_hwfn, cm_info);
+
+ return true;
+ }
+
+ return false;
+}
+
+static struct qed_iwarp_listener *
+qed_iwarp_get_listener(struct qed_hwfn *p_hwfn,
+ struct qed_iwarp_cm_info *cm_info)
+{
+ struct qed_iwarp_listener *listener = NULL;
+ static const u32 ip_zero[4] = { 0, 0, 0, 0 };
+ bool found = false;
+
+ list_for_each_entry(listener,
+ &p_hwfn->p_rdma_info->iwarp.listen_list,
+ list_entry) {
+ if (listener->port == cm_info->local_port) {
+ if (!memcmp(listener->ip_addr,
+ ip_zero, sizeof(ip_zero))) {
+ found = true;
+ break;
+ }
+
+ if (!memcmp(listener->ip_addr,
+ cm_info->local_ip,
+ sizeof(cm_info->local_ip)) &&
+ (listener->vlan == cm_info->vlan)) {
+ found = true;
+ break;
+ }
+ }
+ }
+
+ if (found) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "listener found = %p\n",
+ listener);
+ return listener;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "listener not found\n");
+ return NULL;
+}
+
+static int
+qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
+ struct qed_iwarp_cm_info *cm_info,
+ void *buf,
+ u8 *remote_mac_addr,
+ u8 *local_mac_addr,
+ int *payload_len, int *tcp_start_offset)
+{
+ struct vlan_ethhdr *vethh;
+ bool vlan_valid = false;
+ struct ipv6hdr *ip6h;
+ struct ethhdr *ethh;
+ struct tcphdr *tcph;
+ struct iphdr *iph;
+ int eth_hlen;
+ int ip_hlen;
+ int eth_type;
+ int i;
+
+ ethh = buf;
+ eth_type = ntohs(ethh->h_proto);
+ if (eth_type == ETH_P_8021Q) {
+ vlan_valid = true;
+ vethh = (struct vlan_ethhdr *)ethh;
+ cm_info->vlan = ntohs(vethh->h_vlan_TCI) & VLAN_VID_MASK;
+ eth_type = ntohs(vethh->h_vlan_encapsulated_proto);
+ }
+
+ eth_hlen = ETH_HLEN + (vlan_valid ? sizeof(u32) : 0);
+
+ if (!ether_addr_equal(ethh->h_dest,
+ p_hwfn->p_rdma_info->iwarp.mac_addr)) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_RDMA,
+ "Got unexpected mac %pM instead of %pM\n",
+ ethh->h_dest, p_hwfn->p_rdma_info->iwarp.mac_addr);
+ return -EINVAL;
+ }
+
+ ether_addr_copy(remote_mac_addr, ethh->h_source);
+ ether_addr_copy(local_mac_addr, ethh->h_dest);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "eth_type =%d source mac: %pM\n",
+ eth_type, ethh->h_source);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "eth_hlen=%d destination mac: %pM\n",
+ eth_hlen, ethh->h_dest);
+
+ iph = (struct iphdr *)((u8 *)(ethh) + eth_hlen);
+
+ if (eth_type == ETH_P_IP) {
+ if (iph->protocol != IPPROTO_TCP) {
+ DP_NOTICE(p_hwfn,
+ "Unexpected ip protocol on ll2 %x\n",
+ iph->protocol);
+ return -EINVAL;
+ }
+
+ cm_info->local_ip[0] = ntohl(iph->daddr);
+ cm_info->remote_ip[0] = ntohl(iph->saddr);
+ cm_info->ip_version = QED_TCP_IPV4;
+
+ ip_hlen = (iph->ihl) * sizeof(u32);
+ *payload_len = ntohs(iph->tot_len) - ip_hlen;
+ } else if (eth_type == ETH_P_IPV6) {
+ ip6h = (struct ipv6hdr *)iph;
+
+ if (ip6h->nexthdr != IPPROTO_TCP) {
+ DP_NOTICE(p_hwfn,
+ "Unexpected ip protocol on ll2 %x\n",
+ iph->protocol);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < 4; i++) {
+ cm_info->local_ip[i] =
+ ntohl(ip6h->daddr.in6_u.u6_addr32[i]);
+ cm_info->remote_ip[i] =
+ ntohl(ip6h->saddr.in6_u.u6_addr32[i]);
+ }
+ cm_info->ip_version = QED_TCP_IPV6;
+
+ ip_hlen = sizeof(*ip6h);
+ *payload_len = ntohs(ip6h->payload_len);
+ } else {
+ DP_NOTICE(p_hwfn, "Unexpected ethertype on ll2 %x\n", eth_type);
+ return -EINVAL;
+ }
+
+ tcph = (struct tcphdr *)((u8 *)iph + ip_hlen);
+
+ if (!tcph->syn) {
+ DP_NOTICE(p_hwfn,
+ "Only SYN type packet expected on this ll2 conn, iph->ihl=%d source=%d dest=%d\n",
+ iph->ihl, tcph->source, tcph->dest);
+ return -EINVAL;
+ }
+
+ cm_info->local_port = ntohs(tcph->dest);
+ cm_info->remote_port = ntohs(tcph->source);
+
+ qed_iwarp_print_cm_info(p_hwfn, cm_info);
+
+ *tcp_start_offset = eth_hlen + ip_hlen;
+
+ return 0;
+}
+
+static struct qed_iwarp_fpdu *qed_iwarp_get_curr_fpdu(struct qed_hwfn *p_hwfn,
+ u16 cid)
+{
+ struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
+ struct qed_iwarp_fpdu *partial_fpdu;
+ u32 idx;
+
+ idx = cid - qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_IWARP);
+ if (idx >= iwarp_info->max_num_partial_fpdus) {
+ DP_ERR(p_hwfn, "Invalid cid %x max_num_partial_fpdus=%x\n", cid,
+ iwarp_info->max_num_partial_fpdus);
+ return NULL;
+ }
+
+ partial_fpdu = &iwarp_info->partial_fpdus[idx];
+
+ return partial_fpdu;
+}
+
+enum qed_iwarp_mpa_pkt_type {
+ QED_IWARP_MPA_PKT_PACKED,
+ QED_IWARP_MPA_PKT_PARTIAL,
+ QED_IWARP_MPA_PKT_UNALIGNED
+};
+
+#define QED_IWARP_INVALID_FPDU_LENGTH 0xffff
+#define QED_IWARP_MPA_FPDU_LENGTH_SIZE (2)
+#define QED_IWARP_MPA_CRC32_DIGEST_SIZE (4)
+
+/* Pad to multiple of 4 */
+#define QED_IWARP_PDU_DATA_LEN_WITH_PAD(data_len) ALIGN(data_len, 4)
+#define QED_IWARP_FPDU_LEN_WITH_PAD(_mpa_len) \
+ (QED_IWARP_PDU_DATA_LEN_WITH_PAD((_mpa_len) + \
+ QED_IWARP_MPA_FPDU_LENGTH_SIZE) + \
+ QED_IWARP_MPA_CRC32_DIGEST_SIZE)
+
+/* fpdu can be fragmented over maximum 3 bds: header, partial mpa, unaligned */
+#define QED_IWARP_MAX_BDS_PER_FPDU 3
+
+static const char * const pkt_type_str[] = {
+ "QED_IWARP_MPA_PKT_PACKED",
+ "QED_IWARP_MPA_PKT_PARTIAL",
+ "QED_IWARP_MPA_PKT_UNALIGNED"
+};
+
+static int
+qed_iwarp_recycle_pkt(struct qed_hwfn *p_hwfn,
+ struct qed_iwarp_fpdu *fpdu,
+ struct qed_iwarp_ll2_buff *buf);
+
+static enum qed_iwarp_mpa_pkt_type
+qed_iwarp_mpa_classify(struct qed_hwfn *p_hwfn,
+ struct qed_iwarp_fpdu *fpdu,
+ u16 tcp_payload_len, u8 *mpa_data)
+{
+ enum qed_iwarp_mpa_pkt_type pkt_type;
+ u16 mpa_len;
+
+ if (fpdu->incomplete_bytes) {
+ pkt_type = QED_IWARP_MPA_PKT_UNALIGNED;
+ goto out;
+ }
+
+ /* special case of one byte remaining...
+ * lower byte will be read next packet
+ */
+ if (tcp_payload_len == 1) {
+ fpdu->fpdu_length = *mpa_data << BITS_PER_BYTE;
+ pkt_type = QED_IWARP_MPA_PKT_PARTIAL;
+ goto out;
+ }
+
+ mpa_len = ntohs(*(__force __be16 *)mpa_data);
+ fpdu->fpdu_length = QED_IWARP_FPDU_LEN_WITH_PAD(mpa_len);
+
+ if (fpdu->fpdu_length <= tcp_payload_len)
+ pkt_type = QED_IWARP_MPA_PKT_PACKED;
+ else
+ pkt_type = QED_IWARP_MPA_PKT_PARTIAL;
+
+out:
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "MPA_ALIGN: %s: fpdu_length=0x%x tcp_payload_len:0x%x\n",
+ pkt_type_str[pkt_type], fpdu->fpdu_length, tcp_payload_len);
+
+ return pkt_type;
+}
+
+static void
+qed_iwarp_init_fpdu(struct qed_iwarp_ll2_buff *buf,
+ struct qed_iwarp_fpdu *fpdu,
+ struct unaligned_opaque_data *pkt_data,
+ u16 tcp_payload_size, u8 placement_offset)
+{
+ u16 first_mpa_offset = le16_to_cpu(pkt_data->first_mpa_offset);
+
+ fpdu->mpa_buf = buf;
+ fpdu->pkt_hdr = buf->data_phys_addr + placement_offset;
+ fpdu->pkt_hdr_size = pkt_data->tcp_payload_offset;
+ fpdu->mpa_frag = buf->data_phys_addr + first_mpa_offset;
+ fpdu->mpa_frag_virt = (u8 *)(buf->data) + first_mpa_offset;
+
+ if (tcp_payload_size == 1)
+ fpdu->incomplete_bytes = QED_IWARP_INVALID_FPDU_LENGTH;
+ else if (tcp_payload_size < fpdu->fpdu_length)
+ fpdu->incomplete_bytes = fpdu->fpdu_length - tcp_payload_size;
+ else
+ fpdu->incomplete_bytes = 0; /* complete fpdu */
+
+ fpdu->mpa_frag_len = fpdu->fpdu_length - fpdu->incomplete_bytes;
+}
+
+static int
+qed_iwarp_cp_pkt(struct qed_hwfn *p_hwfn,
+ struct qed_iwarp_fpdu *fpdu,
+ struct unaligned_opaque_data *pkt_data,
+ struct qed_iwarp_ll2_buff *buf, u16 tcp_payload_size)
+{
+ u16 first_mpa_offset = le16_to_cpu(pkt_data->first_mpa_offset);
+ u8 *tmp_buf = p_hwfn->p_rdma_info->iwarp.mpa_intermediate_buf;
+ int rc;
+
+ /* need to copy the data from the partial packet stored in fpdu
+ * to the new buf, for this we also need to move the data currently
+ * placed on the buf. The assumption is that the buffer is big enough
+ * since fpdu_length <= mss, we use an intermediate buffer since
+ * we may need to copy the new data to an overlapping location
+ */
+ if ((fpdu->mpa_frag_len + tcp_payload_size) > (u16)buf->buff_size) {
+ DP_ERR(p_hwfn,
+ "MPA ALIGN: Unexpected: buffer is not large enough for split fpdu buff_size = %d mpa_frag_len = %d, tcp_payload_size = %d, incomplete_bytes = %d\n",
+ buf->buff_size, fpdu->mpa_frag_len,
+ tcp_payload_size, fpdu->incomplete_bytes);
+ return -EINVAL;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "MPA ALIGN Copying fpdu: [%p, %d] [%p, %d]\n",
+ fpdu->mpa_frag_virt, fpdu->mpa_frag_len,
+ (u8 *)(buf->data) + first_mpa_offset, tcp_payload_size);
+
+ memcpy(tmp_buf, fpdu->mpa_frag_virt, fpdu->mpa_frag_len);
+ memcpy(tmp_buf + fpdu->mpa_frag_len,
+ (u8 *)(buf->data) + first_mpa_offset, tcp_payload_size);
+
+ rc = qed_iwarp_recycle_pkt(p_hwfn, fpdu, fpdu->mpa_buf);
+ if (rc)
+ return rc;
+
+ /* If we managed to post the buffer copy the data to the new buffer
+ * o/w this will occur in the next round...
+ */
+ memcpy((u8 *)(buf->data), tmp_buf,
+ fpdu->mpa_frag_len + tcp_payload_size);
+
+ fpdu->mpa_buf = buf;
+ /* fpdu->pkt_hdr remains as is */
+ /* fpdu->mpa_frag is overridden with new buf */
+ fpdu->mpa_frag = buf->data_phys_addr;
+ fpdu->mpa_frag_virt = buf->data;
+ fpdu->mpa_frag_len += tcp_payload_size;
+
+ fpdu->incomplete_bytes -= tcp_payload_size;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_RDMA,
+ "MPA ALIGN: split fpdu buff_size = %d mpa_frag_len = %d, tcp_payload_size = %d, incomplete_bytes = %d\n",
+ buf->buff_size, fpdu->mpa_frag_len, tcp_payload_size,
+ fpdu->incomplete_bytes);
+
+ return 0;
+}
+
+static void
+qed_iwarp_update_fpdu_length(struct qed_hwfn *p_hwfn,
+ struct qed_iwarp_fpdu *fpdu, u8 *mpa_data)
+{
+ u16 mpa_len;
+
+ /* Update incomplete packets if needed */
+ if (fpdu->incomplete_bytes == QED_IWARP_INVALID_FPDU_LENGTH) {
+ /* Missing lower byte is now available */
+ mpa_len = fpdu->fpdu_length | *mpa_data;
+ fpdu->fpdu_length = QED_IWARP_FPDU_LEN_WITH_PAD(mpa_len);
+ /* one byte of hdr */
+ fpdu->mpa_frag_len = 1;
+ fpdu->incomplete_bytes = fpdu->fpdu_length - 1;
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_RDMA,
+ "MPA_ALIGN: Partial header mpa_len=%x fpdu_length=%x incomplete_bytes=%x\n",
+ mpa_len, fpdu->fpdu_length, fpdu->incomplete_bytes);
+ }
+}
+
+#define QED_IWARP_IS_RIGHT_EDGE(_curr_pkt) \
+ (GET_FIELD((_curr_pkt)->flags, \
+ UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE))
+
+/* This function is used to recycle a buffer using the ll2 drop option. It
+ * uses the mechanism to ensure that all buffers posted to tx before this one
+ * were completed. The buffer sent here will be sent as a cookie in the tx
+ * completion function and can then be reposted to rx chain when done. The flow
+ * that requires this is the flow where a FPDU splits over more than 3 tcp
+ * segments. In this case the driver needs to re-post a rx buffer instead of
+ * the one received, but driver can't simply repost a buffer it copied from
+ * as there is a case where the buffer was originally a packed FPDU, and is
+ * partially posted to FW. Driver needs to ensure FW is done with it.
+ */
+static int
+qed_iwarp_recycle_pkt(struct qed_hwfn *p_hwfn,
+ struct qed_iwarp_fpdu *fpdu,
+ struct qed_iwarp_ll2_buff *buf)
+{
+ struct qed_ll2_tx_pkt_info tx_pkt;
+ u8 ll2_handle;
+ int rc;
+
+ memset(&tx_pkt, 0, sizeof(tx_pkt));
+ tx_pkt.num_of_bds = 1;
+ tx_pkt.tx_dest = QED_LL2_TX_DEST_DROP;
+ tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2;
+ tx_pkt.first_frag = fpdu->pkt_hdr;
+ tx_pkt.first_frag_len = fpdu->pkt_hdr_size;
+ buf->piggy_buf = NULL;
+ tx_pkt.cookie = buf;
+
+ ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle;
+
+ rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_handle, &tx_pkt, true);
+ if (rc)
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Can't drop packet rc=%d\n", rc);
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_RDMA,
+ "MPA_ALIGN: send drop tx packet [%lx, 0x%x], buf=%p, rc=%d\n",
+ (unsigned long int)tx_pkt.first_frag,
+ tx_pkt.first_frag_len, buf, rc);
+
+ return rc;
+}
+
+static int
+qed_iwarp_win_right_edge(struct qed_hwfn *p_hwfn, struct qed_iwarp_fpdu *fpdu)
+{
+ struct qed_ll2_tx_pkt_info tx_pkt;
+ u8 ll2_handle;
+ int rc;
+
+ memset(&tx_pkt, 0, sizeof(tx_pkt));
+ tx_pkt.num_of_bds = 1;
+ tx_pkt.tx_dest = QED_LL2_TX_DEST_LB;
+ tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2;
+
+ tx_pkt.first_frag = fpdu->pkt_hdr;
+ tx_pkt.first_frag_len = fpdu->pkt_hdr_size;
+ tx_pkt.enable_ip_cksum = true;
+ tx_pkt.enable_l4_cksum = true;
+ tx_pkt.calc_ip_len = true;
+ /* vlan overload with enum iwarp_ll2_tx_queues */
+ tx_pkt.vlan = IWARP_LL2_ALIGNED_RIGHT_TRIMMED_TX_QUEUE;
+
+ ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle;
+
+ rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_handle, &tx_pkt, true);
+ if (rc)
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Can't send right edge rc=%d\n", rc);
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_RDMA,
+ "MPA_ALIGN: Sent right edge FPDU num_bds=%d [%lx, 0x%x], rc=%d\n",
+ tx_pkt.num_of_bds,
+ (unsigned long int)tx_pkt.first_frag,
+ tx_pkt.first_frag_len, rc);
+
+ return rc;
+}
+
+static int
+qed_iwarp_send_fpdu(struct qed_hwfn *p_hwfn,
+ struct qed_iwarp_fpdu *fpdu,
+ struct unaligned_opaque_data *curr_pkt,
+ struct qed_iwarp_ll2_buff *buf,
+ u16 tcp_payload_size, enum qed_iwarp_mpa_pkt_type pkt_type)
+{
+ struct qed_ll2_tx_pkt_info tx_pkt;
+ u16 first_mpa_offset;
+ u8 ll2_handle;
+ int rc;
+
+ memset(&tx_pkt, 0, sizeof(tx_pkt));
+
+ /* An unaligned packet means it's split over two tcp segments. So the
+ * complete packet requires 3 bds, one for the header, one for the
+ * part of the fpdu of the first tcp segment, and the last fragment
+ * will point to the remainder of the fpdu. A packed pdu, requires only
+ * two bds, one for the header and one for the data.
+ */
+ tx_pkt.num_of_bds = (pkt_type == QED_IWARP_MPA_PKT_UNALIGNED) ? 3 : 2;
+ tx_pkt.tx_dest = QED_LL2_TX_DEST_LB;
+ tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2; /* offset in words */
+
+ /* Send the mpa_buf only with the last fpdu (in case of packed) */
+ if (pkt_type == QED_IWARP_MPA_PKT_UNALIGNED ||
+ tcp_payload_size <= fpdu->fpdu_length)
+ tx_pkt.cookie = fpdu->mpa_buf;
+
+ tx_pkt.first_frag = fpdu->pkt_hdr;
+ tx_pkt.first_frag_len = fpdu->pkt_hdr_size;
+ tx_pkt.enable_ip_cksum = true;
+ tx_pkt.enable_l4_cksum = true;
+ tx_pkt.calc_ip_len = true;
+ /* vlan overload with enum iwarp_ll2_tx_queues */
+ tx_pkt.vlan = IWARP_LL2_ALIGNED_TX_QUEUE;
+
+ /* special case of unaligned packet and not packed, need to send
+ * both buffers as cookie to release.
+ */
+ if (tcp_payload_size == fpdu->incomplete_bytes)
+ fpdu->mpa_buf->piggy_buf = buf;
+
+ ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle;
+
+ /* Set first fragment to header */
+ rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_handle, &tx_pkt, true);
+ if (rc)
+ goto out;
+
+ /* Set second fragment to first part of packet */
+ rc = qed_ll2_set_fragment_of_tx_packet(p_hwfn, ll2_handle,
+ fpdu->mpa_frag,
+ fpdu->mpa_frag_len);
+ if (rc)
+ goto out;
+
+ if (!fpdu->incomplete_bytes)
+ goto out;
+
+ first_mpa_offset = le16_to_cpu(curr_pkt->first_mpa_offset);
+
+ /* Set third fragment to second part of the packet */
+ rc = qed_ll2_set_fragment_of_tx_packet(p_hwfn,
+ ll2_handle,
+ buf->data_phys_addr +
+ first_mpa_offset,
+ fpdu->incomplete_bytes);
+out:
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_RDMA,
+ "MPA_ALIGN: Sent FPDU num_bds=%d first_frag_len=%x, mpa_frag_len=0x%x, incomplete_bytes:0x%x rc=%d\n",
+ tx_pkt.num_of_bds,
+ tx_pkt.first_frag_len,
+ fpdu->mpa_frag_len,
+ fpdu->incomplete_bytes, rc);
+
+ return rc;
+}
+
+static void
+qed_iwarp_mpa_get_data(struct qed_hwfn *p_hwfn,
+ struct unaligned_opaque_data *curr_pkt,
+ u32 opaque_data0, u32 opaque_data1)
+{
+ u64 opaque_data;
+
+ opaque_data = HILO_64(cpu_to_le32(opaque_data1),
+ cpu_to_le32(opaque_data0));
+ *curr_pkt = *((struct unaligned_opaque_data *)&opaque_data);
+
+ le16_add_cpu(&curr_pkt->first_mpa_offset,
+ curr_pkt->tcp_payload_offset);
+}
+
+/* This function is called when an unaligned or incomplete MPA packet arrives
+ * driver needs to align the packet, perhaps using previous data and send
+ * it down to FW once it is aligned.
+ */
+static int
+qed_iwarp_process_mpa_pkt(struct qed_hwfn *p_hwfn,
+ struct qed_iwarp_ll2_mpa_buf *mpa_buf)
+{
+ struct unaligned_opaque_data *curr_pkt = &mpa_buf->data;
+ struct qed_iwarp_ll2_buff *buf = mpa_buf->ll2_buf;
+ enum qed_iwarp_mpa_pkt_type pkt_type;
+ struct qed_iwarp_fpdu *fpdu;
+ u16 cid, first_mpa_offset;
+ int rc = -EINVAL;
+ u8 *mpa_data;
+
+ cid = le32_to_cpu(curr_pkt->cid);
+
+ fpdu = qed_iwarp_get_curr_fpdu(p_hwfn, (u16)cid);
+ if (!fpdu) { /* something corrupt with cid, post rx back */
+ DP_ERR(p_hwfn, "Invalid cid, drop and post back to rx cid=%x\n",
+ cid);
+ goto err;
+ }
+
+ do {
+ first_mpa_offset = le16_to_cpu(curr_pkt->first_mpa_offset);
+ mpa_data = ((u8 *)(buf->data) + first_mpa_offset);
+
+ pkt_type = qed_iwarp_mpa_classify(p_hwfn, fpdu,
+ mpa_buf->tcp_payload_len,
+ mpa_data);
+
+ switch (pkt_type) {
+ case QED_IWARP_MPA_PKT_PARTIAL:
+ qed_iwarp_init_fpdu(buf, fpdu,
+ curr_pkt,
+ mpa_buf->tcp_payload_len,
+ mpa_buf->placement_offset);
+
+ if (!QED_IWARP_IS_RIGHT_EDGE(curr_pkt)) {
+ mpa_buf->tcp_payload_len = 0;
+ break;
+ }
+
+ rc = qed_iwarp_win_right_edge(p_hwfn, fpdu);
+
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Can't send FPDU:reset rc=%d\n", rc);
+ memset(fpdu, 0, sizeof(*fpdu));
+ break;
+ }
+
+ mpa_buf->tcp_payload_len = 0;
+ break;
+ case QED_IWARP_MPA_PKT_PACKED:
+ qed_iwarp_init_fpdu(buf, fpdu,
+ curr_pkt,
+ mpa_buf->tcp_payload_len,
+ mpa_buf->placement_offset);
+
+ rc = qed_iwarp_send_fpdu(p_hwfn, fpdu, curr_pkt, buf,
+ mpa_buf->tcp_payload_len,
+ pkt_type);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Can't send FPDU:reset rc=%d\n", rc);
+ memset(fpdu, 0, sizeof(*fpdu));
+ break;
+ }
+
+ mpa_buf->tcp_payload_len -= fpdu->fpdu_length;
+ le16_add_cpu(&curr_pkt->first_mpa_offset,
+ fpdu->fpdu_length);
+ break;
+ case QED_IWARP_MPA_PKT_UNALIGNED:
+ qed_iwarp_update_fpdu_length(p_hwfn, fpdu, mpa_data);
+ if (mpa_buf->tcp_payload_len < fpdu->incomplete_bytes) {
+ /* special handling of fpdu split over more
+ * than 2 segments
+ */
+ if (QED_IWARP_IS_RIGHT_EDGE(curr_pkt)) {
+ rc = qed_iwarp_win_right_edge(p_hwfn,
+ fpdu);
+ /* packet will be re-processed later */
+ if (rc)
+ return rc;
+ }
+
+ rc = qed_iwarp_cp_pkt(p_hwfn, fpdu, curr_pkt,
+ buf,
+ mpa_buf->tcp_payload_len);
+ if (rc) /* packet will be re-processed later */
+ return rc;
+
+ mpa_buf->tcp_payload_len = 0;
+ break;
+ }
+
+ rc = qed_iwarp_send_fpdu(p_hwfn, fpdu, curr_pkt, buf,
+ mpa_buf->tcp_payload_len,
+ pkt_type);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Can't send FPDU:delay rc=%d\n", rc);
+ /* don't reset fpdu -> we need it for next
+ * classify
+ */
+ break;
+ }
+
+ mpa_buf->tcp_payload_len -= fpdu->incomplete_bytes;
+ le16_add_cpu(&curr_pkt->first_mpa_offset,
+ fpdu->incomplete_bytes);
+
+ /* The framed PDU was sent - no more incomplete bytes */
+ fpdu->incomplete_bytes = 0;
+ break;
+ }
+ } while (mpa_buf->tcp_payload_len && !rc);
+
+ return rc;
+
+err:
+ qed_iwarp_ll2_post_rx(p_hwfn,
+ buf,
+ p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle);
+ return rc;
+}
+
+static void qed_iwarp_process_pending_pkts(struct qed_hwfn *p_hwfn)
+{
+ struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
+ struct qed_iwarp_ll2_mpa_buf *mpa_buf = NULL;
+ int rc;
+
+ while (!list_empty(&iwarp_info->mpa_buf_pending_list)) {
+ mpa_buf = list_first_entry(&iwarp_info->mpa_buf_pending_list,
+ struct qed_iwarp_ll2_mpa_buf,
+ list_entry);
+
+ rc = qed_iwarp_process_mpa_pkt(p_hwfn, mpa_buf);
+
+ /* busy means break and continue processing later, don't
+ * remove the buf from the pending list.
+ */
+ if (rc == -EBUSY)
+ break;
+
+ list_move_tail(&mpa_buf->list_entry,
+ &iwarp_info->mpa_buf_list);
+
+ if (rc) { /* different error, don't continue */
+ DP_NOTICE(p_hwfn, "process pkts failed rc=%d\n", rc);
+ break;
+ }
+ }
+}
+
+static void
+qed_iwarp_ll2_comp_mpa_pkt(void *cxt, struct qed_ll2_comp_rx_data *data)
+{
+ struct qed_iwarp_ll2_mpa_buf *mpa_buf;
+ struct qed_iwarp_info *iwarp_info;
+ struct qed_hwfn *p_hwfn = cxt;
+ u16 first_mpa_offset;
+
+ iwarp_info = &p_hwfn->p_rdma_info->iwarp;
+ mpa_buf = list_first_entry(&iwarp_info->mpa_buf_list,
+ struct qed_iwarp_ll2_mpa_buf, list_entry);
+ if (!mpa_buf) {
+ DP_ERR(p_hwfn, "No free mpa buf\n");
+ goto err;
+ }
+
+ list_del(&mpa_buf->list_entry);
+ qed_iwarp_mpa_get_data(p_hwfn, &mpa_buf->data,
+ data->opaque_data_0, data->opaque_data_1);
+
+ first_mpa_offset = le16_to_cpu(mpa_buf->data.first_mpa_offset);
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_RDMA,
+ "LL2 MPA CompRx payload_len:0x%x\tfirst_mpa_offset:0x%x\ttcp_payload_offset:0x%x\tflags:0x%x\tcid:0x%x\n",
+ data->length.packet_length, first_mpa_offset,
+ mpa_buf->data.tcp_payload_offset, mpa_buf->data.flags,
+ mpa_buf->data.cid);
+
+ mpa_buf->ll2_buf = data->cookie;
+ mpa_buf->tcp_payload_len = data->length.packet_length -
+ first_mpa_offset;
+
+ first_mpa_offset += data->u.placement_offset;
+ mpa_buf->data.first_mpa_offset = cpu_to_le16(first_mpa_offset);
+ mpa_buf->placement_offset = data->u.placement_offset;
+
+ list_add_tail(&mpa_buf->list_entry, &iwarp_info->mpa_buf_pending_list);
+
+ qed_iwarp_process_pending_pkts(p_hwfn);
+ return;
+err:
+ qed_iwarp_ll2_post_rx(p_hwfn, data->cookie,
+ iwarp_info->ll2_mpa_handle);
+}
+
+static void
+qed_iwarp_ll2_comp_syn_pkt(void *cxt, struct qed_ll2_comp_rx_data *data)
+{
+ struct qed_iwarp_ll2_buff *buf = data->cookie;
+ struct qed_iwarp_listener *listener;
+ struct qed_ll2_tx_pkt_info tx_pkt;
+ struct qed_iwarp_cm_info cm_info;
+ struct qed_hwfn *p_hwfn = cxt;
+ u8 remote_mac_addr[ETH_ALEN];
+ u8 local_mac_addr[ETH_ALEN];
+ struct qed_iwarp_ep *ep;
+ int tcp_start_offset;
+ u8 ll2_syn_handle;
+ int payload_len;
+ u32 hdr_size;
+ int rc;
+
+ memset(&cm_info, 0, sizeof(cm_info));
+ ll2_syn_handle = p_hwfn->p_rdma_info->iwarp.ll2_syn_handle;
+
+ /* Check if packet was received with errors... */
+ if (data->err_flags) {
+ DP_NOTICE(p_hwfn, "Error received on SYN packet: 0x%x\n",
+ data->err_flags);
+ goto err;
+ }
+
+ if (GET_FIELD(data->parse_flags,
+ PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED) &&
+ GET_FIELD(data->parse_flags, PARSING_AND_ERR_FLAGS_L4CHKSMERROR)) {
+ DP_NOTICE(p_hwfn, "Syn packet received with checksum error\n");
+ goto err;
+ }
+
+ rc = qed_iwarp_parse_rx_pkt(p_hwfn, &cm_info, (u8 *)(buf->data) +
+ data->u.placement_offset, remote_mac_addr,
+ local_mac_addr, &payload_len,
+ &tcp_start_offset);
+ if (rc)
+ goto err;
+
+ /* Check if there is a listener for this 4-tuple+vlan */
+ listener = qed_iwarp_get_listener(p_hwfn, &cm_info);
+ if (!listener) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_RDMA,
+ "SYN received on tuple not listened on parse_flags=%d packet len=%d\n",
+ data->parse_flags, data->length.packet_length);
+
+ memset(&tx_pkt, 0, sizeof(tx_pkt));
+ tx_pkt.num_of_bds = 1;
+ tx_pkt.l4_hdr_offset_w = (data->length.packet_length) >> 2;
+ tx_pkt.tx_dest = QED_LL2_TX_DEST_LB;
+ tx_pkt.first_frag = buf->data_phys_addr +
+ data->u.placement_offset;
+ tx_pkt.first_frag_len = data->length.packet_length;
+ tx_pkt.cookie = buf;
+
+ rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_syn_handle,
+ &tx_pkt, true);
+
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "Can't post SYN back to chip rc=%d\n", rc);
+ goto err;
+ }
+ return;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Received syn on listening port\n");
+ /* There may be an open ep on this connection if this is a syn
+ * retrasnmit... need to make sure there isn't...
+ */
+ if (qed_iwarp_ep_exists(p_hwfn, &cm_info))
+ goto err;
+
+ ep = qed_iwarp_get_free_ep(p_hwfn);
+ if (!ep)
+ goto err;
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+ list_add_tail(&ep->list_entry, &p_hwfn->p_rdma_info->iwarp.ep_list);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+
+ ether_addr_copy(ep->remote_mac_addr, remote_mac_addr);
+ ether_addr_copy(ep->local_mac_addr, local_mac_addr);
+
+ memcpy(&ep->cm_info, &cm_info, sizeof(ep->cm_info));
+
+ hdr_size = ((cm_info.ip_version == QED_TCP_IPV4) ? 40 : 60);
+ ep->mss = p_hwfn->p_rdma_info->iwarp.max_mtu - hdr_size;
+ ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss);
+
+ ep->event_cb = listener->event_cb;
+ ep->cb_context = listener->cb_context;
+ ep->connect_mode = TCP_CONNECT_PASSIVE;
+
+ ep->syn = buf;
+ ep->syn_ip_payload_length = (u16)payload_len;
+ ep->syn_phy_addr = buf->data_phys_addr + data->u.placement_offset +
+ tcp_start_offset;
+
+ rc = qed_iwarp_tcp_offload(p_hwfn, ep);
+ if (rc) {
+ qed_iwarp_return_ep(p_hwfn, ep);
+ goto err;
+ }
+
+ return;
+err:
+ qed_iwarp_ll2_post_rx(p_hwfn, buf, ll2_syn_handle);
+}
+
+static void qed_iwarp_ll2_rel_rx_pkt(void *cxt, u8 connection_handle,
+ void *cookie, dma_addr_t rx_buf_addr,
+ bool b_last_packet)
+{
+ struct qed_iwarp_ll2_buff *buffer = cookie;
+ struct qed_hwfn *p_hwfn = cxt;
+
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev, buffer->buff_size,
+ buffer->data, buffer->data_phys_addr);
+ kfree(buffer);
+}
+
+static void qed_iwarp_ll2_comp_tx_pkt(void *cxt, u8 connection_handle,
+ void *cookie, dma_addr_t first_frag_addr,
+ bool b_last_fragment, bool b_last_packet)
+{
+ struct qed_iwarp_ll2_buff *buffer = cookie;
+ struct qed_iwarp_ll2_buff *piggy;
+ struct qed_hwfn *p_hwfn = cxt;
+
+ if (!buffer) /* can happen in packed mpa unaligned... */
+ return;
+
+ /* this was originally an rx packet, post it back */
+ piggy = buffer->piggy_buf;
+ if (piggy) {
+ buffer->piggy_buf = NULL;
+ qed_iwarp_ll2_post_rx(p_hwfn, piggy, connection_handle);
+ }
+
+ qed_iwarp_ll2_post_rx(p_hwfn, buffer, connection_handle);
+
+ if (connection_handle == p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle)
+ qed_iwarp_process_pending_pkts(p_hwfn);
+
+ return;
+}
+
+static void qed_iwarp_ll2_rel_tx_pkt(void *cxt, u8 connection_handle,
+ void *cookie, dma_addr_t first_frag_addr,
+ bool b_last_fragment, bool b_last_packet)
+{
+ struct qed_iwarp_ll2_buff *buffer = cookie;
+ struct qed_hwfn *p_hwfn = cxt;
+
+ if (!buffer)
+ return;
+
+ if (buffer->piggy_buf) {
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ buffer->piggy_buf->buff_size,
+ buffer->piggy_buf->data,
+ buffer->piggy_buf->data_phys_addr);
+
+ kfree(buffer->piggy_buf);
+ }
+
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev, buffer->buff_size,
+ buffer->data, buffer->data_phys_addr);
+
+ kfree(buffer);
+}
+
+/* The only slowpath for iwarp ll2 is unalign flush. When this completion
+ * is received, need to reset the FPDU.
+ */
+static void
+qed_iwarp_ll2_slowpath(void *cxt,
+ u8 connection_handle,
+ u32 opaque_data_0, u32 opaque_data_1)
+{
+ struct unaligned_opaque_data unalign_data;
+ struct qed_hwfn *p_hwfn = cxt;
+ struct qed_iwarp_fpdu *fpdu;
+ u32 cid;
+
+ qed_iwarp_mpa_get_data(p_hwfn, &unalign_data,
+ opaque_data_0, opaque_data_1);
+
+ cid = le32_to_cpu(unalign_data.cid);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "(0x%x) Flush fpdu\n", cid);
+
+ fpdu = qed_iwarp_get_curr_fpdu(p_hwfn, (u16)cid);
+ if (fpdu)
+ memset(fpdu, 0, sizeof(*fpdu));
+}
+
+static int qed_iwarp_ll2_stop(struct qed_hwfn *p_hwfn)
+{
+ struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp;
+ int rc = 0;
+
+ if (iwarp_info->ll2_syn_handle != QED_IWARP_HANDLE_INVAL) {
+ rc = qed_ll2_terminate_connection(p_hwfn,
+ iwarp_info->ll2_syn_handle);
+ if (rc)
+ DP_INFO(p_hwfn, "Failed to terminate syn connection\n");
+
+ qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_syn_handle);
+ iwarp_info->ll2_syn_handle = QED_IWARP_HANDLE_INVAL;
+ }
+
+ if (iwarp_info->ll2_ooo_handle != QED_IWARP_HANDLE_INVAL) {
+ rc = qed_ll2_terminate_connection(p_hwfn,
+ iwarp_info->ll2_ooo_handle);
+ if (rc)
+ DP_INFO(p_hwfn, "Failed to terminate ooo connection\n");
+
+ qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_ooo_handle);
+ iwarp_info->ll2_ooo_handle = QED_IWARP_HANDLE_INVAL;
+ }
+
+ if (iwarp_info->ll2_mpa_handle != QED_IWARP_HANDLE_INVAL) {
+ rc = qed_ll2_terminate_connection(p_hwfn,
+ iwarp_info->ll2_mpa_handle);
+ if (rc)
+ DP_INFO(p_hwfn, "Failed to terminate mpa connection\n");
+
+ qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_mpa_handle);
+ iwarp_info->ll2_mpa_handle = QED_IWARP_HANDLE_INVAL;
+ }
+
+ qed_llh_remove_mac_filter(p_hwfn->cdev, 0,
+ p_hwfn->p_rdma_info->iwarp.mac_addr);
+
+ return rc;
+}
+
+static int
+qed_iwarp_ll2_alloc_buffers(struct qed_hwfn *p_hwfn,
+ int num_rx_bufs, int buff_size, u8 ll2_handle)
+{
+ struct qed_iwarp_ll2_buff *buffer;
+ int rc = 0;
+ int i;
+
+ for (i = 0; i < num_rx_bufs; i++) {
+ buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!buffer) {
+ rc = -ENOMEM;
+ break;
+ }
+
+ buffer->data = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ buff_size,
+ &buffer->data_phys_addr,
+ GFP_KERNEL);
+ if (!buffer->data) {
+ kfree(buffer);
+ rc = -ENOMEM;
+ break;
+ }
+
+ buffer->buff_size = buff_size;
+ rc = qed_iwarp_ll2_post_rx(p_hwfn, buffer, ll2_handle);
+ if (rc)
+ /* buffers will be deallocated by qed_ll2 */
+ break;
+ }
+ return rc;
+}
+
+#define QED_IWARP_MAX_BUF_SIZE(mtu) \
+ ALIGN((mtu) + ETH_HLEN + 2 * VLAN_HLEN + 2 + ETH_CACHE_LINE_SIZE, \
+ ETH_CACHE_LINE_SIZE)
+
+static int
+qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_start_in_params *params,
+ u32 rcv_wnd_size)
+{
+ struct qed_iwarp_info *iwarp_info;
+ struct qed_ll2_acquire_data data;
+ struct qed_ll2_cbs cbs;
+ u32 buff_size;
+ u16 n_ooo_bufs;
+ int rc = 0;
+ int i;
+
+ iwarp_info = &p_hwfn->p_rdma_info->iwarp;
+ iwarp_info->ll2_syn_handle = QED_IWARP_HANDLE_INVAL;
+ iwarp_info->ll2_ooo_handle = QED_IWARP_HANDLE_INVAL;
+ iwarp_info->ll2_mpa_handle = QED_IWARP_HANDLE_INVAL;
+
+ iwarp_info->max_mtu = params->max_mtu;
+
+ ether_addr_copy(p_hwfn->p_rdma_info->iwarp.mac_addr, params->mac_addr);
+
+ rc = qed_llh_add_mac_filter(p_hwfn->cdev, 0, params->mac_addr);
+ if (rc)
+ return rc;
+
+ /* Start SYN connection */
+ cbs.rx_comp_cb = qed_iwarp_ll2_comp_syn_pkt;
+ cbs.rx_release_cb = qed_iwarp_ll2_rel_rx_pkt;
+ cbs.tx_comp_cb = qed_iwarp_ll2_comp_tx_pkt;
+ cbs.tx_release_cb = qed_iwarp_ll2_rel_tx_pkt;
+ cbs.slowpath_cb = NULL;
+ cbs.cookie = p_hwfn;
+
+ memset(&data, 0, sizeof(data));
+ data.input.conn_type = QED_LL2_TYPE_IWARP;
+ /* SYN will use ctx based queues */
+ data.input.rx_conn_type = QED_LL2_RX_TYPE_CTX;
+ data.input.mtu = params->max_mtu;
+ data.input.rx_num_desc = QED_IWARP_LL2_SYN_RX_SIZE;
+ data.input.tx_num_desc = QED_IWARP_LL2_SYN_TX_SIZE;
+ data.input.tx_max_bds_per_packet = 1; /* will never be fragmented */
+ data.input.tx_tc = PKT_LB_TC;
+ data.input.tx_dest = QED_LL2_TX_DEST_LB;
+ data.p_connection_handle = &iwarp_info->ll2_syn_handle;
+ data.cbs = &cbs;
+
+ rc = qed_ll2_acquire_connection(p_hwfn, &data);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to acquire LL2 connection\n");
+ qed_llh_remove_mac_filter(p_hwfn->cdev, 0, params->mac_addr);
+ return rc;
+ }
+
+ rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_syn_handle);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to establish LL2 connection\n");
+ goto err;
+ }
+
+ buff_size = QED_IWARP_MAX_BUF_SIZE(params->max_mtu);
+ rc = qed_iwarp_ll2_alloc_buffers(p_hwfn,
+ QED_IWARP_LL2_SYN_RX_SIZE,
+ buff_size,
+ iwarp_info->ll2_syn_handle);
+ if (rc)
+ goto err;
+
+ /* Start OOO connection */
+ data.input.conn_type = QED_LL2_TYPE_OOO;
+ /* OOO/unaligned will use legacy ll2 queues (ram based) */
+ data.input.rx_conn_type = QED_LL2_RX_TYPE_LEGACY;
+ data.input.mtu = params->max_mtu;
+
+ n_ooo_bufs = (QED_IWARP_MAX_OOO * rcv_wnd_size) /
+ iwarp_info->max_mtu;
+ n_ooo_bufs = min_t(u32, n_ooo_bufs, QED_IWARP_LL2_OOO_MAX_RX_SIZE);
+
+ data.input.rx_num_desc = n_ooo_bufs;
+ data.input.rx_num_ooo_buffers = n_ooo_bufs;
+
+ data.input.tx_max_bds_per_packet = 1; /* will never be fragmented */
+ data.input.tx_num_desc = QED_IWARP_LL2_OOO_DEF_TX_SIZE;
+ data.p_connection_handle = &iwarp_info->ll2_ooo_handle;
+
+ rc = qed_ll2_acquire_connection(p_hwfn, &data);
+ if (rc)
+ goto err;
+
+ rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_ooo_handle);
+ if (rc)
+ goto err;
+
+ /* Start Unaligned MPA connection */
+ cbs.rx_comp_cb = qed_iwarp_ll2_comp_mpa_pkt;
+ cbs.slowpath_cb = qed_iwarp_ll2_slowpath;
+
+ memset(&data, 0, sizeof(data));
+ data.input.conn_type = QED_LL2_TYPE_IWARP;
+ data.input.mtu = params->max_mtu;
+ /* FW requires that once a packet arrives OOO, it must have at
+ * least 2 rx buffers available on the unaligned connection
+ * for handling the case that it is a partial fpdu.
+ */
+ data.input.rx_num_desc = n_ooo_bufs * 2;
+ data.input.tx_num_desc = data.input.rx_num_desc;
+ data.input.tx_max_bds_per_packet = QED_IWARP_MAX_BDS_PER_FPDU;
+ data.input.tx_tc = PKT_LB_TC;
+ data.input.tx_dest = QED_LL2_TX_DEST_LB;
+ data.p_connection_handle = &iwarp_info->ll2_mpa_handle;
+ data.input.secondary_queue = true;
+ data.cbs = &cbs;
+
+ rc = qed_ll2_acquire_connection(p_hwfn, &data);
+ if (rc)
+ goto err;
+
+ rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_mpa_handle);
+ if (rc)
+ goto err;
+
+ rc = qed_iwarp_ll2_alloc_buffers(p_hwfn,
+ data.input.rx_num_desc,
+ buff_size,
+ iwarp_info->ll2_mpa_handle);
+ if (rc)
+ goto err;
+
+ iwarp_info->partial_fpdus = kcalloc((u16)p_hwfn->p_rdma_info->num_qps,
+ sizeof(*iwarp_info->partial_fpdus),
+ GFP_KERNEL);
+ if (!iwarp_info->partial_fpdus) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ iwarp_info->max_num_partial_fpdus = (u16)p_hwfn->p_rdma_info->num_qps;
+
+ iwarp_info->mpa_intermediate_buf = kzalloc(buff_size, GFP_KERNEL);
+ if (!iwarp_info->mpa_intermediate_buf) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ /* The mpa_bufs array serves for pending RX packets received on the
+ * mpa ll2 that don't have place on the tx ring and require later
+ * processing. We can't fail on allocation of such a struct therefore
+ * we allocate enough to take care of all rx packets
+ */
+ iwarp_info->mpa_bufs = kcalloc(data.input.rx_num_desc,
+ sizeof(*iwarp_info->mpa_bufs),
+ GFP_KERNEL);
+ if (!iwarp_info->mpa_bufs) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ INIT_LIST_HEAD(&iwarp_info->mpa_buf_pending_list);
+ INIT_LIST_HEAD(&iwarp_info->mpa_buf_list);
+ for (i = 0; i < data.input.rx_num_desc; i++)
+ list_add_tail(&iwarp_info->mpa_bufs[i].list_entry,
+ &iwarp_info->mpa_buf_list);
+ return rc;
+err:
+ qed_iwarp_ll2_stop(p_hwfn);
+
+ return rc;
+}
+
+static struct {
+ u32 two_ports;
+ u32 four_ports;
+} qed_iwarp_rcv_wnd_size[MAX_CHIP_IDS] = {
+ {QED_IWARP_RCV_WND_SIZE_DEF_BB_2P, QED_IWARP_RCV_WND_SIZE_DEF_BB_4P},
+ {QED_IWARP_RCV_WND_SIZE_DEF_AH_2P, QED_IWARP_RCV_WND_SIZE_DEF_AH_4P}
+};
+
+int qed_iwarp_setup(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_start_in_params *params)
+{
+ struct qed_dev *cdev = p_hwfn->cdev;
+ struct qed_iwarp_info *iwarp_info;
+ enum chip_ids chip_id;
+ u32 rcv_wnd_size;
+
+ iwarp_info = &p_hwfn->p_rdma_info->iwarp;
+
+ iwarp_info->tcp_flags = QED_IWARP_TS_EN;
+
+ chip_id = QED_IS_BB(cdev) ? CHIP_BB : CHIP_K2;
+ rcv_wnd_size = (qed_device_num_ports(cdev) == 4) ?
+ qed_iwarp_rcv_wnd_size[chip_id].four_ports :
+ qed_iwarp_rcv_wnd_size[chip_id].two_ports;
+
+ /* value 0 is used for ilog2(QED_IWARP_RCV_WND_SIZE_MIN) */
+ iwarp_info->rcv_wnd_scale = ilog2(rcv_wnd_size) -
+ ilog2(QED_IWARP_RCV_WND_SIZE_MIN);
+ iwarp_info->rcv_wnd_size = rcv_wnd_size >> iwarp_info->rcv_wnd_scale;
+ iwarp_info->crc_needed = QED_IWARP_PARAM_CRC_NEEDED;
+ iwarp_info->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED;
+
+ iwarp_info->peer2peer = QED_IWARP_PARAM_P2P;
+
+ iwarp_info->rtr_type = MPA_RTR_TYPE_ZERO_SEND |
+ MPA_RTR_TYPE_ZERO_WRITE |
+ MPA_RTR_TYPE_ZERO_READ;
+
+ spin_lock_init(&p_hwfn->p_rdma_info->iwarp.qp_lock);
+ INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.ep_list);
+ INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.listen_list);
+
+ qed_spq_register_async_cb(p_hwfn, PROTOCOLID_IWARP,
+ qed_iwarp_async_event);
+ qed_ooo_setup(p_hwfn);
+
+ return qed_iwarp_ll2_start(p_hwfn, params, rcv_wnd_size);
+}
+
+int qed_iwarp_stop(struct qed_hwfn *p_hwfn)
+{
+ int rc;
+
+ qed_iwarp_free_prealloc_ep(p_hwfn);
+ rc = qed_iwarp_wait_for_all_cids(p_hwfn);
+ if (rc)
+ return rc;
+
+ return qed_iwarp_ll2_stop(p_hwfn);
+}
+
+static void qed_iwarp_qp_in_error(struct qed_hwfn *p_hwfn,
+ struct qed_iwarp_ep *ep,
+ u8 fw_return_code)
+{
+ struct qed_iwarp_cm_event_params params;
+
+ qed_iwarp_modify_qp(p_hwfn, ep->qp, QED_IWARP_QP_STATE_ERROR, true);
+
+ params.event = QED_IWARP_EVENT_CLOSE;
+ params.ep_context = ep;
+ params.cm_info = &ep->cm_info;
+ params.status = (fw_return_code == IWARP_QP_IN_ERROR_GOOD_CLOSE) ?
+ 0 : -ECONNRESET;
+
+ /* paired with READ_ONCE in destroy_qp */
+ smp_store_release(&ep->state, QED_IWARP_EP_CLOSED);
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+ list_del(&ep->list_entry);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+
+ ep->event_cb(ep->cb_context, &params);
+}
+
+static void qed_iwarp_exception_received(struct qed_hwfn *p_hwfn,
+ struct qed_iwarp_ep *ep,
+ int fw_ret_code)
+{
+ struct qed_iwarp_cm_event_params params;
+ bool event_cb = false;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "EP(0x%x) fw_ret_code=%d\n",
+ ep->cid, fw_ret_code);
+
+ switch (fw_ret_code) {
+ case IWARP_EXCEPTION_DETECTED_LLP_CLOSED:
+ params.status = 0;
+ params.event = QED_IWARP_EVENT_DISCONNECT;
+ event_cb = true;
+ break;
+ case IWARP_EXCEPTION_DETECTED_LLP_RESET:
+ params.status = -ECONNRESET;
+ params.event = QED_IWARP_EVENT_DISCONNECT;
+ event_cb = true;
+ break;
+ case IWARP_EXCEPTION_DETECTED_RQ_EMPTY:
+ params.event = QED_IWARP_EVENT_RQ_EMPTY;
+ event_cb = true;
+ break;
+ case IWARP_EXCEPTION_DETECTED_IRQ_FULL:
+ params.event = QED_IWARP_EVENT_IRQ_FULL;
+ event_cb = true;
+ break;
+ case IWARP_EXCEPTION_DETECTED_LLP_TIMEOUT:
+ params.event = QED_IWARP_EVENT_LLP_TIMEOUT;
+ event_cb = true;
+ break;
+ case IWARP_EXCEPTION_DETECTED_REMOTE_PROTECTION_ERROR:
+ params.event = QED_IWARP_EVENT_REMOTE_PROTECTION_ERROR;
+ event_cb = true;
+ break;
+ case IWARP_EXCEPTION_DETECTED_CQ_OVERFLOW:
+ params.event = QED_IWARP_EVENT_CQ_OVERFLOW;
+ event_cb = true;
+ break;
+ case IWARP_EXCEPTION_DETECTED_LOCAL_CATASTROPHIC:
+ params.event = QED_IWARP_EVENT_QP_CATASTROPHIC;
+ event_cb = true;
+ break;
+ case IWARP_EXCEPTION_DETECTED_LOCAL_ACCESS_ERROR:
+ params.event = QED_IWARP_EVENT_LOCAL_ACCESS_ERROR;
+ event_cb = true;
+ break;
+ case IWARP_EXCEPTION_DETECTED_REMOTE_OPERATION_ERROR:
+ params.event = QED_IWARP_EVENT_REMOTE_OPERATION_ERROR;
+ event_cb = true;
+ break;
+ case IWARP_EXCEPTION_DETECTED_TERMINATE_RECEIVED:
+ params.event = QED_IWARP_EVENT_TERMINATE_RECEIVED;
+ event_cb = true;
+ break;
+ default:
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Unhandled exception received...fw_ret_code=%d\n",
+ fw_ret_code);
+ break;
+ }
+
+ if (event_cb) {
+ params.ep_context = ep;
+ params.cm_info = &ep->cm_info;
+ ep->event_cb(ep->cb_context, &params);
+ }
+}
+
+static void
+qed_iwarp_tcp_connect_unsuccessful(struct qed_hwfn *p_hwfn,
+ struct qed_iwarp_ep *ep, u8 fw_return_code)
+{
+ struct qed_iwarp_cm_event_params params;
+
+ memset(&params, 0, sizeof(params));
+ params.event = QED_IWARP_EVENT_ACTIVE_COMPLETE;
+ params.ep_context = ep;
+ params.cm_info = &ep->cm_info;
+ /* paired with READ_ONCE in destroy_qp */
+ smp_store_release(&ep->state, QED_IWARP_EP_CLOSED);
+
+ switch (fw_return_code) {
+ case IWARP_CONN_ERROR_TCP_CONNECT_INVALID_PACKET:
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "%s(0x%x) TCP connect got invalid packet\n",
+ QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
+ params.status = -ECONNRESET;
+ break;
+ case IWARP_CONN_ERROR_TCP_CONNECTION_RST:
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "%s(0x%x) TCP Connection Reset\n",
+ QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
+ params.status = -ECONNRESET;
+ break;
+ case IWARP_CONN_ERROR_TCP_CONNECT_TIMEOUT:
+ DP_NOTICE(p_hwfn, "%s(0x%x) TCP timeout\n",
+ QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
+ params.status = -EBUSY;
+ break;
+ case IWARP_CONN_ERROR_MPA_NOT_SUPPORTED_VER:
+ DP_NOTICE(p_hwfn, "%s(0x%x) MPA not supported VER\n",
+ QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
+ params.status = -ECONNREFUSED;
+ break;
+ case IWARP_CONN_ERROR_MPA_INVALID_PACKET:
+ DP_NOTICE(p_hwfn, "%s(0x%x) MPA Invalid Packet\n",
+ QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid);
+ params.status = -ECONNRESET;
+ break;
+ default:
+ DP_ERR(p_hwfn,
+ "%s(0x%x) Unexpected return code tcp connect: %d\n",
+ QED_IWARP_CONNECT_MODE_STRING(ep),
+ ep->tcp_cid, fw_return_code);
+ params.status = -ECONNRESET;
+ break;
+ }
+
+ if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
+ ep->tcp_cid = QED_IWARP_INVALID_TCP_CID;
+ qed_iwarp_return_ep(p_hwfn, ep);
+ } else {
+ ep->event_cb(ep->cb_context, &params);
+ spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+ list_del(&ep->list_entry);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+ }
+}
+
+static void
+qed_iwarp_connect_complete(struct qed_hwfn *p_hwfn,
+ struct qed_iwarp_ep *ep, u8 fw_return_code)
+{
+ u8 ll2_syn_handle = p_hwfn->p_rdma_info->iwarp.ll2_syn_handle;
+
+ if (ep->connect_mode == TCP_CONNECT_PASSIVE) {
+ /* Done with the SYN packet, post back to ll2 rx */
+ qed_iwarp_ll2_post_rx(p_hwfn, ep->syn, ll2_syn_handle);
+
+ ep->syn = NULL;
+
+ /* If connect failed - upper layer doesn't know about it */
+ if (fw_return_code == RDMA_RETURN_OK)
+ qed_iwarp_mpa_received(p_hwfn, ep);
+ else
+ qed_iwarp_tcp_connect_unsuccessful(p_hwfn, ep,
+ fw_return_code);
+ } else {
+ if (fw_return_code == RDMA_RETURN_OK)
+ qed_iwarp_mpa_offload(p_hwfn, ep);
+ else
+ qed_iwarp_tcp_connect_unsuccessful(p_hwfn, ep,
+ fw_return_code);
+ }
+}
+
+static inline bool
+qed_iwarp_check_ep_ok(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
+{
+ if (!ep || (ep->sig != QED_EP_SIG)) {
+ DP_ERR(p_hwfn, "ERROR ON ASYNC ep=%p\n", ep);
+ return false;
+ }
+
+ return true;
+}
+
+static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code,
+ __le16 echo, union event_ring_data *data,
+ u8 fw_return_code)
+{
+ struct qed_rdma_events events = p_hwfn->p_rdma_info->events;
+ struct regpair *fw_handle = &data->rdma_data.async_handle;
+ struct qed_iwarp_ep *ep = NULL;
+ u16 srq_offset;
+ u16 srq_id;
+ u16 cid;
+
+ ep = (struct qed_iwarp_ep *)(uintptr_t)HILO_64(fw_handle->hi,
+ fw_handle->lo);
+
+ switch (fw_event_code) {
+ case IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE:
+ /* Async completion after TCP 3-way handshake */
+ if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
+ return -EINVAL;
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_RDMA,
+ "EP(0x%x) IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE fw_ret_code=%d\n",
+ ep->tcp_cid, fw_return_code);
+ qed_iwarp_connect_complete(p_hwfn, ep, fw_return_code);
+ break;
+ case IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED:
+ if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
+ return -EINVAL;
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_RDMA,
+ "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED fw_ret_code=%d\n",
+ ep->cid, fw_return_code);
+ qed_iwarp_exception_received(p_hwfn, ep, fw_return_code);
+ break;
+ case IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE:
+ /* Async completion for Close Connection ramrod */
+ if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
+ return -EINVAL;
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_RDMA,
+ "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE fw_ret_code=%d\n",
+ ep->cid, fw_return_code);
+ qed_iwarp_qp_in_error(p_hwfn, ep, fw_return_code);
+ break;
+ case IWARP_EVENT_TYPE_ASYNC_ENHANCED_MPA_REPLY_ARRIVED:
+ /* Async event for active side only */
+ if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
+ return -EINVAL;
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_RDMA,
+ "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_MPA_REPLY_ARRIVED fw_ret_code=%d\n",
+ ep->cid, fw_return_code);
+ qed_iwarp_mpa_reply_arrived(p_hwfn, ep);
+ break;
+ case IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE:
+ if (!qed_iwarp_check_ep_ok(p_hwfn, ep))
+ return -EINVAL;
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_RDMA,
+ "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE fw_ret_code=%d\n",
+ ep->cid, fw_return_code);
+ qed_iwarp_mpa_complete(p_hwfn, ep, fw_return_code);
+ break;
+ case IWARP_EVENT_TYPE_ASYNC_CID_CLEANED:
+ cid = (u16)le32_to_cpu(fw_handle->lo);
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "(0x%x)IWARP_EVENT_TYPE_ASYNC_CID_CLEANED\n", cid);
+ qed_iwarp_cid_cleaned(p_hwfn, cid);
+
+ break;
+ case IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY:
+ DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY\n");
+ srq_offset = p_hwfn->p_rdma_info->srq_id_offset;
+ /* FW assigns value that is no greater than u16 */
+ srq_id = ((u16)le32_to_cpu(fw_handle->lo)) - srq_offset;
+ events.affiliated_event(events.context,
+ QED_IWARP_EVENT_SRQ_EMPTY,
+ &srq_id);
+ break;
+ case IWARP_EVENT_TYPE_ASYNC_SRQ_LIMIT:
+ DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_SRQ_LIMIT\n");
+ srq_offset = p_hwfn->p_rdma_info->srq_id_offset;
+ /* FW assigns value that is no greater than u16 */
+ srq_id = ((u16)le32_to_cpu(fw_handle->lo)) - srq_offset;
+ events.affiliated_event(events.context,
+ QED_IWARP_EVENT_SRQ_LIMIT,
+ &srq_id);
+ break;
+ case IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW:
+ DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW\n");
+
+ p_hwfn->p_rdma_info->events.affiliated_event(
+ p_hwfn->p_rdma_info->events.context,
+ QED_IWARP_EVENT_CQ_OVERFLOW,
+ (void *)fw_handle);
+ break;
+ default:
+ DP_ERR(p_hwfn, "Received unexpected async iwarp event %d\n",
+ fw_event_code);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int
+qed_iwarp_create_listen(void *rdma_cxt,
+ struct qed_iwarp_listen_in *iparams,
+ struct qed_iwarp_listen_out *oparams)
+{
+ struct qed_hwfn *p_hwfn = rdma_cxt;
+ struct qed_iwarp_listener *listener;
+
+ listener = kzalloc(sizeof(*listener), GFP_KERNEL);
+ if (!listener)
+ return -ENOMEM;
+
+ listener->ip_version = iparams->ip_version;
+ memcpy(listener->ip_addr, iparams->ip_addr, sizeof(listener->ip_addr));
+ listener->port = iparams->port;
+ listener->vlan = iparams->vlan;
+
+ listener->event_cb = iparams->event_cb;
+ listener->cb_context = iparams->cb_context;
+ listener->max_backlog = iparams->max_backlog;
+ oparams->handle = listener;
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+ list_add_tail(&listener->list_entry,
+ &p_hwfn->p_rdma_info->iwarp.listen_list);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_RDMA,
+ "callback=%p handle=%p ip=%x:%x:%x:%x port=0x%x vlan=0x%x\n",
+ listener->event_cb,
+ listener,
+ listener->ip_addr[0],
+ listener->ip_addr[1],
+ listener->ip_addr[2],
+ listener->ip_addr[3], listener->port, listener->vlan);
+
+ return 0;
+}
+
+int qed_iwarp_destroy_listen(void *rdma_cxt, void *handle)
+{
+ struct qed_iwarp_listener *listener = handle;
+ struct qed_hwfn *p_hwfn = rdma_cxt;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "handle=%p\n", handle);
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+ list_del(&listener->list_entry);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+
+ kfree(listener);
+
+ return 0;
+}
+
+int qed_iwarp_send_rtr(void *rdma_cxt, struct qed_iwarp_send_rtr_in *iparams)
+{
+ struct qed_hwfn *p_hwfn = rdma_cxt;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ struct qed_iwarp_ep *ep;
+ struct qed_rdma_qp *qp;
+ int rc;
+
+ ep = iparams->ep_context;
+ if (!ep) {
+ DP_ERR(p_hwfn, "Ep Context receive in send_rtr is NULL\n");
+ return -EINVAL;
+ }
+
+ qp = ep->qp;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x)\n",
+ qp->icid, ep->tcp_cid);
+
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qp->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_CB;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR,
+ PROTOCOLID_IWARP, &init_data);
+
+ if (rc)
+ return rc;
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = 0x%x\n", rc);
+
+ return rc;
+}
+
+void
+qed_iwarp_query_qp(struct qed_rdma_qp *qp,
+ struct qed_rdma_query_qp_out_params *out_params)
+{
+ out_params->state = qed_iwarp2roce_state(qp->iwarp_state);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
new file mode 100644
index 000000000..c3872cd94
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h
@@ -0,0 +1,206 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ * Copyright (c) 2019-2020 Marvell International Ltd.
+ */
+
+#ifndef _QED_IWARP_H
+#define _QED_IWARP_H
+
+enum qed_iwarp_qp_state {
+ QED_IWARP_QP_STATE_IDLE,
+ QED_IWARP_QP_STATE_RTS,
+ QED_IWARP_QP_STATE_TERMINATE,
+ QED_IWARP_QP_STATE_CLOSING,
+ QED_IWARP_QP_STATE_ERROR,
+};
+
+enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state);
+
+#define QED_IWARP_PREALLOC_CNT (256)
+
+#define QED_IWARP_LL2_SYN_TX_SIZE (128)
+#define QED_IWARP_LL2_SYN_RX_SIZE (256)
+
+#define QED_IWARP_LL2_OOO_DEF_TX_SIZE (256)
+#define QED_IWARP_MAX_OOO (16)
+#define QED_IWARP_LL2_OOO_MAX_RX_SIZE (16384)
+
+#define QED_IWARP_HANDLE_INVAL (0xff)
+
+struct qed_iwarp_ll2_buff {
+ struct qed_iwarp_ll2_buff *piggy_buf;
+ void *data;
+ dma_addr_t data_phys_addr;
+ u32 buff_size;
+};
+
+struct qed_iwarp_ll2_mpa_buf {
+ struct list_head list_entry;
+ struct qed_iwarp_ll2_buff *ll2_buf;
+ struct unaligned_opaque_data data;
+ u16 tcp_payload_len;
+ u8 placement_offset;
+};
+
+/* In some cases a fpdu will arrive with only one byte of the header, in this
+ * case the fpdu_length will be partial (contain only higher byte and
+ * incomplete bytes will contain the invalid value
+ */
+#define QED_IWARP_INVALID_INCOMPLETE_BYTES 0xffff
+
+struct qed_iwarp_fpdu {
+ struct qed_iwarp_ll2_buff *mpa_buf;
+ void *mpa_frag_virt;
+ dma_addr_t mpa_frag;
+ dma_addr_t pkt_hdr;
+ u16 mpa_frag_len;
+ u16 fpdu_length;
+ u16 incomplete_bytes;
+ u8 pkt_hdr_size;
+};
+
+struct qed_iwarp_info {
+ struct list_head listen_list; /* qed_iwarp_listener */
+ struct list_head ep_list; /* qed_iwarp_ep */
+ struct list_head ep_free_list; /* pre-allocated ep's */
+ struct list_head mpa_buf_list; /* list of mpa_bufs */
+ struct list_head mpa_buf_pending_list;
+ spinlock_t iw_lock; /* for iwarp resources */
+ spinlock_t qp_lock; /* for teardown races */
+ u32 rcv_wnd_scale;
+ u16 rcv_wnd_size;
+ u16 max_mtu;
+ u8 mac_addr[ETH_ALEN];
+ u8 crc_needed;
+ u8 tcp_flags;
+ u8 ll2_syn_handle;
+ u8 ll2_ooo_handle;
+ u8 ll2_mpa_handle;
+ u8 peer2peer;
+ enum mpa_negotiation_mode mpa_rev;
+ enum mpa_rtr_type rtr_type;
+ struct qed_iwarp_fpdu *partial_fpdus;
+ struct qed_iwarp_ll2_mpa_buf *mpa_bufs;
+ u8 *mpa_intermediate_buf;
+ u16 max_num_partial_fpdus;
+};
+
+enum qed_iwarp_ep_state {
+ QED_IWARP_EP_INIT,
+ QED_IWARP_EP_MPA_REQ_RCVD,
+ QED_IWARP_EP_MPA_OFFLOADED,
+ QED_IWARP_EP_ESTABLISHED,
+ QED_IWARP_EP_CLOSED
+};
+
+union async_output {
+ struct iwarp_eqe_data_mpa_async_completion mpa_response;
+ struct iwarp_eqe_data_tcp_async_completion mpa_request;
+};
+
+#define QED_MAX_PRIV_DATA_LEN (512)
+struct qed_iwarp_ep_memory {
+ u8 in_pdata[QED_MAX_PRIV_DATA_LEN];
+ u8 out_pdata[QED_MAX_PRIV_DATA_LEN];
+ union async_output async_output;
+};
+
+/* Endpoint structure represents a TCP connection. This connection can be
+ * associated with a QP or not (in which case QP==NULL)
+ */
+struct qed_iwarp_ep {
+ struct list_head list_entry;
+ struct qed_rdma_qp *qp;
+ struct qed_iwarp_ep_memory *ep_buffer_virt;
+ dma_addr_t ep_buffer_phys;
+ enum qed_iwarp_ep_state state;
+ int sig;
+ struct qed_iwarp_cm_info cm_info;
+ enum tcp_connect_mode connect_mode;
+ enum mpa_rtr_type rtr_type;
+ enum mpa_negotiation_mode mpa_rev;
+ u32 tcp_cid;
+ u32 cid;
+ u16 mss;
+ u8 remote_mac_addr[6];
+ u8 local_mac_addr[6];
+ bool mpa_reply_processed;
+
+ /* For Passive side - syn packet related data */
+ u16 syn_ip_payload_length;
+ struct qed_iwarp_ll2_buff *syn;
+ dma_addr_t syn_phy_addr;
+
+ /* The event_cb function is called for asynchrounous events associated
+ * with the ep. It is initialized at different entry points depending
+ * on whether the ep is the tcp connection active side or passive side
+ * The cb_context is passed to the event_cb function.
+ */
+ iwarp_event_handler event_cb;
+ void *cb_context;
+};
+
+struct qed_iwarp_listener {
+ struct list_head list_entry;
+
+ /* The event_cb function is called for connection requests.
+ * The cb_context is passed to the event_cb function.
+ */
+ iwarp_event_handler event_cb;
+ void *cb_context;
+ u32 max_backlog;
+ u32 ip_addr[4];
+ u16 port;
+ u16 vlan;
+ u8 ip_version;
+};
+
+int qed_iwarp_alloc(struct qed_hwfn *p_hwfn);
+
+int qed_iwarp_setup(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_start_in_params *params);
+
+void qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn,
+ struct iwarp_init_func_ramrod_data *p_ramrod);
+
+int qed_iwarp_stop(struct qed_hwfn *p_hwfn);
+
+void qed_iwarp_resc_free(struct qed_hwfn *p_hwfn);
+
+void qed_iwarp_init_devinfo(struct qed_hwfn *p_hwfn);
+
+void qed_iwarp_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+int qed_iwarp_create_qp(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp,
+ struct qed_rdma_create_qp_out_params *out_params);
+
+int qed_iwarp_modify_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp,
+ enum qed_iwarp_qp_state new_state, bool internal);
+
+int qed_iwarp_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp);
+
+int qed_iwarp_fw_destroy(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp);
+
+void qed_iwarp_query_qp(struct qed_rdma_qp *qp,
+ struct qed_rdma_query_qp_out_params *out_params);
+
+int
+qed_iwarp_connect(void *rdma_cxt,
+ struct qed_iwarp_connect_in *iparams,
+ struct qed_iwarp_connect_out *oparams);
+
+int
+qed_iwarp_create_listen(void *rdma_cxt,
+ struct qed_iwarp_listen_in *iparams,
+ struct qed_iwarp_listen_out *oparams);
+
+int qed_iwarp_accept(void *rdma_cxt, struct qed_iwarp_accept_in *iparams);
+
+int qed_iwarp_reject(void *rdma_cxt, struct qed_iwarp_reject_in *iparams);
+int qed_iwarp_destroy_listen(void *rdma_cxt, void *handle);
+
+int qed_iwarp_send_rtr(void *rdma_cxt, struct qed_iwarp_send_rtr_in *iparams);
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
new file mode 100644
index 000000000..970b9aabb
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -0,0 +1,2920 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ * Copyright (c) 2019-2020 Marvell International Ltd.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <asm/param.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
+#include <linux/bitops.h>
+#include <linux/bug.h>
+#include <linux/vmalloc.h>
+#include "qed.h"
+#include <linux/qed/qed_chain.h>
+#include "qed_cxt.h"
+#include "qed_dcbx.h"
+#include "qed_dev_api.h"
+#include <linux/qed/qed_eth_if.h>
+#include "qed_hsi.h"
+#include "qed_iro_hsi.h"
+#include "qed_hw.h"
+#include "qed_int.h"
+#include "qed_l2.h"
+#include "qed_mcp.h"
+#include "qed_ptp.h"
+#include "qed_reg_addr.h"
+#include "qed_sp.h"
+#include "qed_sriov.h"
+
+#define QED_MAX_SGES_NUM 16
+#define CRC32_POLY 0x1edc6f41
+
+struct qed_l2_info {
+ u32 queues;
+ unsigned long **pp_qid_usage;
+
+ /* The lock is meant to synchronize access to the qid usage */
+ struct mutex lock;
+};
+
+int qed_l2_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_l2_info *p_l2_info;
+ unsigned long **pp_qids;
+ u32 i;
+
+ if (!QED_IS_L2_PERSONALITY(p_hwfn))
+ return 0;
+
+ p_l2_info = kzalloc(sizeof(*p_l2_info), GFP_KERNEL);
+ if (!p_l2_info)
+ return -ENOMEM;
+ p_hwfn->p_l2_info = p_l2_info;
+
+ if (IS_PF(p_hwfn->cdev)) {
+ p_l2_info->queues = RESC_NUM(p_hwfn, QED_L2_QUEUE);
+ } else {
+ u8 rx = 0, tx = 0;
+
+ qed_vf_get_num_rxqs(p_hwfn, &rx);
+ qed_vf_get_num_txqs(p_hwfn, &tx);
+
+ p_l2_info->queues = max_t(u8, rx, tx);
+ }
+
+ pp_qids = kcalloc(p_l2_info->queues, sizeof(unsigned long *),
+ GFP_KERNEL);
+ if (!pp_qids)
+ return -ENOMEM;
+ p_l2_info->pp_qid_usage = pp_qids;
+
+ for (i = 0; i < p_l2_info->queues; i++) {
+ pp_qids[i] = kzalloc(MAX_QUEUES_PER_QZONE / 8, GFP_KERNEL);
+ if (!pp_qids[i])
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void qed_l2_setup(struct qed_hwfn *p_hwfn)
+{
+ if (!QED_IS_L2_PERSONALITY(p_hwfn))
+ return;
+
+ mutex_init(&p_hwfn->p_l2_info->lock);
+}
+
+void qed_l2_free(struct qed_hwfn *p_hwfn)
+{
+ u32 i;
+
+ if (!QED_IS_L2_PERSONALITY(p_hwfn))
+ return;
+
+ if (!p_hwfn->p_l2_info)
+ return;
+
+ if (!p_hwfn->p_l2_info->pp_qid_usage)
+ goto out_l2_info;
+
+ /* Free until hit first uninitialized entry */
+ for (i = 0; i < p_hwfn->p_l2_info->queues; i++) {
+ if (!p_hwfn->p_l2_info->pp_qid_usage[i])
+ break;
+ kfree(p_hwfn->p_l2_info->pp_qid_usage[i]);
+ }
+
+ kfree(p_hwfn->p_l2_info->pp_qid_usage);
+
+out_l2_info:
+ kfree(p_hwfn->p_l2_info);
+ p_hwfn->p_l2_info = NULL;
+}
+
+static bool qed_eth_queue_qid_usage_add(struct qed_hwfn *p_hwfn,
+ struct qed_queue_cid *p_cid)
+{
+ struct qed_l2_info *p_l2_info = p_hwfn->p_l2_info;
+ u16 queue_id = p_cid->rel.queue_id;
+ bool b_rc = true;
+ u8 first;
+
+ mutex_lock(&p_l2_info->lock);
+
+ if (queue_id >= p_l2_info->queues) {
+ DP_NOTICE(p_hwfn,
+ "Requested to increase usage for qzone %04x out of %08x\n",
+ queue_id, p_l2_info->queues);
+ b_rc = false;
+ goto out;
+ }
+
+ first = (u8)find_first_zero_bit(p_l2_info->pp_qid_usage[queue_id],
+ MAX_QUEUES_PER_QZONE);
+ if (first >= MAX_QUEUES_PER_QZONE) {
+ b_rc = false;
+ goto out;
+ }
+
+ __set_bit(first, p_l2_info->pp_qid_usage[queue_id]);
+ p_cid->qid_usage_idx = first;
+
+out:
+ mutex_unlock(&p_l2_info->lock);
+ return b_rc;
+}
+
+static void qed_eth_queue_qid_usage_del(struct qed_hwfn *p_hwfn,
+ struct qed_queue_cid *p_cid)
+{
+ mutex_lock(&p_hwfn->p_l2_info->lock);
+
+ clear_bit(p_cid->qid_usage_idx,
+ p_hwfn->p_l2_info->pp_qid_usage[p_cid->rel.queue_id]);
+
+ mutex_unlock(&p_hwfn->p_l2_info->lock);
+}
+
+void qed_eth_queue_cid_release(struct qed_hwfn *p_hwfn,
+ struct qed_queue_cid *p_cid)
+{
+ bool b_legacy_vf = !!(p_cid->vf_legacy & QED_QCID_LEGACY_VF_CID);
+
+ if (IS_PF(p_hwfn->cdev) && !b_legacy_vf)
+ _qed_cxt_release_cid(p_hwfn, p_cid->cid, p_cid->vfid);
+
+ /* For PF's VFs we maintain the index inside queue-zone in IOV */
+ if (p_cid->vfid == QED_QUEUE_CID_SELF)
+ qed_eth_queue_qid_usage_del(p_hwfn, p_cid);
+
+ vfree(p_cid);
+}
+
+/* The internal is only meant to be directly called by PFs initializeing CIDs
+ * for their VFs.
+ */
+static struct qed_queue_cid *
+_qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ u32 cid,
+ struct qed_queue_start_common_params *p_params,
+ bool b_is_rx,
+ struct qed_queue_cid_vf_params *p_vf_params)
+{
+ struct qed_queue_cid *p_cid;
+ int rc;
+
+ p_cid = vzalloc(sizeof(*p_cid));
+ if (!p_cid)
+ return NULL;
+
+ p_cid->opaque_fid = opaque_fid;
+ p_cid->cid = cid;
+ p_cid->p_owner = p_hwfn;
+
+ /* Fill in parameters */
+ p_cid->rel.vport_id = p_params->vport_id;
+ p_cid->rel.queue_id = p_params->queue_id;
+ p_cid->rel.stats_id = p_params->stats_id;
+ p_cid->sb_igu_id = p_params->p_sb->igu_sb_id;
+ p_cid->b_is_rx = b_is_rx;
+ p_cid->sb_idx = p_params->sb_idx;
+
+ /* Fill-in bits related to VFs' queues if information was provided */
+ if (p_vf_params) {
+ p_cid->vfid = p_vf_params->vfid;
+ p_cid->vf_qid = p_vf_params->vf_qid;
+ p_cid->vf_legacy = p_vf_params->vf_legacy;
+ } else {
+ p_cid->vfid = QED_QUEUE_CID_SELF;
+ }
+
+ /* Don't try calculating the absolute indices for VFs */
+ if (IS_VF(p_hwfn->cdev)) {
+ p_cid->abs = p_cid->rel;
+ goto out;
+ }
+
+ /* Calculate the engine-absolute indices of the resources.
+ * This would guarantee they're valid later on.
+ * In some cases [SBs] we already have the right values.
+ */
+ rc = qed_fw_vport(p_hwfn, p_cid->rel.vport_id, &p_cid->abs.vport_id);
+ if (rc)
+ goto fail;
+
+ rc = qed_fw_l2_queue(p_hwfn, p_cid->rel.queue_id, &p_cid->abs.queue_id);
+ if (rc)
+ goto fail;
+
+ /* In case of a PF configuring its VF's queues, the stats-id is already
+ * absolute [since there's a single index that's suitable per-VF].
+ */
+ if (p_cid->vfid == QED_QUEUE_CID_SELF) {
+ rc = qed_fw_vport(p_hwfn, p_cid->rel.stats_id,
+ &p_cid->abs.stats_id);
+ if (rc)
+ goto fail;
+ } else {
+ p_cid->abs.stats_id = p_cid->rel.stats_id;
+ }
+
+out:
+ /* VF-images have provided the qid_usage_idx on their own.
+ * Otherwise, we need to allocate a unique one.
+ */
+ if (!p_vf_params) {
+ if (!qed_eth_queue_qid_usage_add(p_hwfn, p_cid))
+ goto fail;
+ } else {
+ p_cid->qid_usage_idx = p_vf_params->qid_usage_idx;
+ }
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SP,
+ "opaque_fid: %04x CID %08x vport %02x [%02x] qzone %04x.%02x [%04x] stats %02x [%02x] SB %04x PI %02x\n",
+ p_cid->opaque_fid,
+ p_cid->cid,
+ p_cid->rel.vport_id,
+ p_cid->abs.vport_id,
+ p_cid->rel.queue_id,
+ p_cid->qid_usage_idx,
+ p_cid->abs.queue_id,
+ p_cid->rel.stats_id,
+ p_cid->abs.stats_id, p_cid->sb_igu_id, p_cid->sb_idx);
+
+ return p_cid;
+
+fail:
+ vfree(p_cid);
+ return NULL;
+}
+
+struct qed_queue_cid *
+qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct qed_queue_start_common_params *p_params,
+ bool b_is_rx,
+ struct qed_queue_cid_vf_params *p_vf_params)
+{
+ struct qed_queue_cid *p_cid;
+ u8 vfid = QED_CXT_PF_CID;
+ bool b_legacy_vf = false;
+ u32 cid = 0;
+
+ /* In case of legacy VFs, The CID can be derived from the additional
+ * VF parameters - the VF assumes queue X uses CID X, so we can simply
+ * use the vf_qid for this purpose as well.
+ */
+ if (p_vf_params) {
+ vfid = p_vf_params->vfid;
+
+ if (p_vf_params->vf_legacy & QED_QCID_LEGACY_VF_CID) {
+ b_legacy_vf = true;
+ cid = p_vf_params->vf_qid;
+ }
+ }
+
+ /* Get a unique firmware CID for this queue, in case it's a PF.
+ * VF's don't need a CID as the queue configuration will be done
+ * by PF.
+ */
+ if (IS_PF(p_hwfn->cdev) && !b_legacy_vf) {
+ if (_qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
+ &cid, vfid)) {
+ DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
+ return NULL;
+ }
+ }
+
+ p_cid = _qed_eth_queue_to_cid(p_hwfn, opaque_fid, cid,
+ p_params, b_is_rx, p_vf_params);
+ if (!p_cid && IS_PF(p_hwfn->cdev) && !b_legacy_vf)
+ _qed_cxt_release_cid(p_hwfn, cid, vfid);
+
+ return p_cid;
+}
+
+static struct qed_queue_cid *
+qed_eth_queue_to_cid_pf(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ bool b_is_rx,
+ struct qed_queue_start_common_params *p_params)
+{
+ return qed_eth_queue_to_cid(p_hwfn, opaque_fid, p_params, b_is_rx,
+ NULL);
+}
+
+int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_start_params *p_params)
+{
+ struct vport_start_ramrod_data *p_ramrod = NULL;
+ struct eth_vport_tpa_param *tpa_param;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ u16 min_size, rx_mode = 0;
+ u8 abs_vport_id = 0;
+ int rc;
+
+ rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
+ if (rc)
+ return rc;
+
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_params->opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ ETH_RAMROD_VPORT_START,
+ PROTOCOLID_ETH, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.vport_start;
+ p_ramrod->vport_id = abs_vport_id;
+
+ p_ramrod->mtu = cpu_to_le16(p_params->mtu);
+ p_ramrod->handle_ptp_pkts = p_params->handle_ptp_pkts;
+ p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan;
+ p_ramrod->drop_ttl0_en = p_params->drop_ttl0;
+ p_ramrod->untagged = p_params->only_untagged;
+
+ SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1);
+ SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1);
+
+ p_ramrod->rx_mode.state = cpu_to_le16(rx_mode);
+
+ /* TPA related fields */
+ tpa_param = &p_ramrod->tpa_param;
+ memset(tpa_param, 0, sizeof(*tpa_param));
+
+ tpa_param->max_buff_num = p_params->max_buffers_per_cqe;
+
+ switch (p_params->tpa_mode) {
+ case QED_TPA_MODE_GRO:
+ min_size = p_params->mtu / 2;
+
+ tpa_param->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
+ tpa_param->tpa_max_size = cpu_to_le16(U16_MAX);
+ tpa_param->tpa_min_size_to_cont = cpu_to_le16(min_size);
+ tpa_param->tpa_min_size_to_start = cpu_to_le16(min_size);
+ tpa_param->tpa_ipv4_en_flg = 1;
+ tpa_param->tpa_ipv6_en_flg = 1;
+ tpa_param->tpa_pkt_split_flg = 1;
+ tpa_param->tpa_gro_consistent_flg = 1;
+ break;
+ default:
+ break;
+ }
+
+ p_ramrod->tx_switching_en = p_params->tx_switching;
+
+ p_ramrod->ctl_frame_mac_check_en = !!p_params->check_mac;
+ p_ramrod->ctl_frame_ethtype_check_en = !!p_params->check_ethtype;
+
+ /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
+ p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev,
+ p_params->concrete_fid);
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_start_params *p_params)
+{
+ if (IS_VF(p_hwfn->cdev)) {
+ return qed_vf_pf_vport_start(p_hwfn, p_params->vport_id,
+ p_params->mtu,
+ p_params->remove_inner_vlan,
+ p_params->tpa_mode,
+ p_params->max_buffers_per_cqe,
+ p_params->only_untagged);
+ }
+
+ return qed_sp_eth_vport_start(p_hwfn, p_params);
+}
+
+static int
+qed_sp_vport_update_rss(struct qed_hwfn *p_hwfn,
+ struct vport_update_ramrod_data *p_ramrod,
+ struct qed_rss_params *p_rss)
+{
+ struct eth_vport_rss_config *p_config;
+ u16 capabilities = 0;
+ int i, table_size;
+ int rc = 0;
+
+ if (!p_rss) {
+ p_ramrod->common.update_rss_flg = 0;
+ return rc;
+ }
+ p_config = &p_ramrod->rss_config;
+
+ BUILD_BUG_ON(QED_RSS_IND_TABLE_SIZE != ETH_RSS_IND_TABLE_ENTRIES_NUM);
+
+ rc = qed_fw_rss_eng(p_hwfn, p_rss->rss_eng_id, &p_config->rss_id);
+ if (rc)
+ return rc;
+
+ p_ramrod->common.update_rss_flg = p_rss->update_rss_config;
+ p_config->update_rss_capabilities = p_rss->update_rss_capabilities;
+ p_config->update_rss_ind_table = p_rss->update_rss_ind_table;
+ p_config->update_rss_key = p_rss->update_rss_key;
+
+ p_config->rss_mode = p_rss->rss_enable ?
+ ETH_VPORT_RSS_MODE_REGULAR :
+ ETH_VPORT_RSS_MODE_DISABLED;
+
+ SET_FIELD(capabilities,
+ ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY,
+ !!(p_rss->rss_caps & QED_RSS_IPV4));
+ SET_FIELD(capabilities,
+ ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY,
+ !!(p_rss->rss_caps & QED_RSS_IPV6));
+ SET_FIELD(capabilities,
+ ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY,
+ !!(p_rss->rss_caps & QED_RSS_IPV4_TCP));
+ SET_FIELD(capabilities,
+ ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY,
+ !!(p_rss->rss_caps & QED_RSS_IPV6_TCP));
+ SET_FIELD(capabilities,
+ ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY,
+ !!(p_rss->rss_caps & QED_RSS_IPV4_UDP));
+ SET_FIELD(capabilities,
+ ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY,
+ !!(p_rss->rss_caps & QED_RSS_IPV6_UDP));
+ p_config->tbl_size = p_rss->rss_table_size_log;
+
+ p_config->capabilities = cpu_to_le16(capabilities);
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
+ "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n",
+ p_ramrod->common.update_rss_flg,
+ p_config->rss_mode,
+ p_config->update_rss_capabilities,
+ p_config->capabilities,
+ p_config->update_rss_ind_table, p_config->update_rss_key);
+
+ table_size = min_t(int, QED_RSS_IND_TABLE_SIZE,
+ 1 << p_config->tbl_size);
+ for (i = 0; i < table_size; i++) {
+ struct qed_queue_cid *p_queue = p_rss->rss_ind_table[i];
+
+ if (!p_queue)
+ return -EINVAL;
+
+ p_config->indirection_table[i] =
+ cpu_to_le16(p_queue->abs.queue_id);
+ }
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
+ "Configured RSS indirection table [%d entries]:\n",
+ table_size);
+ for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i += 0x10) {
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_IFUP,
+ "%04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x\n",
+ le16_to_cpu(p_config->indirection_table[i]),
+ le16_to_cpu(p_config->indirection_table[i + 1]),
+ le16_to_cpu(p_config->indirection_table[i + 2]),
+ le16_to_cpu(p_config->indirection_table[i + 3]),
+ le16_to_cpu(p_config->indirection_table[i + 4]),
+ le16_to_cpu(p_config->indirection_table[i + 5]),
+ le16_to_cpu(p_config->indirection_table[i + 6]),
+ le16_to_cpu(p_config->indirection_table[i + 7]),
+ le16_to_cpu(p_config->indirection_table[i + 8]),
+ le16_to_cpu(p_config->indirection_table[i + 9]),
+ le16_to_cpu(p_config->indirection_table[i + 10]),
+ le16_to_cpu(p_config->indirection_table[i + 11]),
+ le16_to_cpu(p_config->indirection_table[i + 12]),
+ le16_to_cpu(p_config->indirection_table[i + 13]),
+ le16_to_cpu(p_config->indirection_table[i + 14]),
+ le16_to_cpu(p_config->indirection_table[i + 15]));
+ }
+
+ for (i = 0; i < 10; i++)
+ p_config->rss_key[i] = cpu_to_le32(p_rss->rss_key[i]);
+
+ return rc;
+}
+
+static void
+qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn,
+ struct vport_update_ramrod_data *p_ramrod,
+ struct qed_filter_accept_flags accept_flags)
+{
+ p_ramrod->common.update_rx_mode_flg =
+ accept_flags.update_rx_mode_config;
+
+ p_ramrod->common.update_tx_mode_flg =
+ accept_flags.update_tx_mode_config;
+
+ /* Set Rx mode accept flags */
+ if (p_ramrod->common.update_rx_mode_flg) {
+ u8 accept_filter = accept_flags.rx_accept_filter;
+ u16 state = 0;
+
+ SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL,
+ !(!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) ||
+ !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
+
+ SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED,
+ !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED));
+
+ SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL,
+ !(!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) ||
+ !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
+
+ SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL,
+ (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
+ !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
+
+ SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL,
+ !!(accept_filter & QED_ACCEPT_BCAST));
+
+ SET_FIELD(state, ETH_VPORT_RX_MODE_ACCEPT_ANY_VNI,
+ !!(accept_filter & QED_ACCEPT_ANY_VNI));
+
+ p_ramrod->rx_mode.state = cpu_to_le16(state);
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "p_ramrod->rx_mode.state = 0x%x\n", state);
+ }
+
+ /* Set Tx mode accept flags */
+ if (p_ramrod->common.update_tx_mode_flg) {
+ u8 accept_filter = accept_flags.tx_accept_filter;
+ u16 state = 0;
+
+ SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL,
+ !!(accept_filter & QED_ACCEPT_NONE));
+
+ SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL,
+ !!(accept_filter & QED_ACCEPT_NONE));
+
+ SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL,
+ (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
+ !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
+
+ SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL,
+ (!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) &&
+ !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
+
+ SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL,
+ !!(accept_filter & QED_ACCEPT_BCAST));
+
+ p_ramrod->tx_mode.state = cpu_to_le16(state);
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "p_ramrod->tx_mode.state = 0x%x\n", state);
+ }
+}
+
+static void
+qed_sp_vport_update_sge_tpa(struct qed_hwfn *p_hwfn,
+ struct vport_update_ramrod_data *p_ramrod,
+ const struct qed_sge_tpa_params *param)
+{
+ struct eth_vport_tpa_param *tpa;
+
+ if (!param) {
+ p_ramrod->common.update_tpa_param_flg = 0;
+ p_ramrod->common.update_tpa_en_flg = 0;
+ p_ramrod->common.update_tpa_param_flg = 0;
+ return;
+ }
+
+ p_ramrod->common.update_tpa_en_flg = param->update_tpa_en_flg;
+ tpa = &p_ramrod->tpa_param;
+ tpa->tpa_ipv4_en_flg = param->tpa_ipv4_en_flg;
+ tpa->tpa_ipv6_en_flg = param->tpa_ipv6_en_flg;
+ tpa->tpa_ipv4_tunn_en_flg = param->tpa_ipv4_tunn_en_flg;
+ tpa->tpa_ipv6_tunn_en_flg = param->tpa_ipv6_tunn_en_flg;
+
+ p_ramrod->common.update_tpa_param_flg = param->update_tpa_param_flg;
+ tpa->max_buff_num = param->max_buffers_per_cqe;
+ tpa->tpa_pkt_split_flg = param->tpa_pkt_split_flg;
+ tpa->tpa_hdr_data_split_flg = param->tpa_hdr_data_split_flg;
+ tpa->tpa_gro_consistent_flg = param->tpa_gro_consistent_flg;
+ tpa->tpa_max_aggs_num = param->tpa_max_aggs_num;
+ tpa->tpa_max_size = cpu_to_le16(param->tpa_max_size);
+ tpa->tpa_min_size_to_start = cpu_to_le16(param->tpa_min_size_to_start);
+ tpa->tpa_min_size_to_cont = cpu_to_le16(param->tpa_min_size_to_cont);
+}
+
+static void
+qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn,
+ struct vport_update_ramrod_data *p_ramrod,
+ struct qed_sp_vport_update_params *p_params)
+{
+ int i;
+
+ memset(&p_ramrod->approx_mcast.bins, 0,
+ sizeof(p_ramrod->approx_mcast.bins));
+
+ if (!p_params->update_approx_mcast_flg)
+ return;
+
+ p_ramrod->common.update_approx_mcast_flg = 1;
+ for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
+ u32 *p_bins = p_params->bins;
+
+ p_ramrod->approx_mcast.bins[i] = cpu_to_le32(p_bins[i]);
+ }
+}
+
+int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_params,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data)
+{
+ struct qed_rss_params *p_rss_params = p_params->rss_params;
+ struct vport_update_ramrod_data_cmn *p_cmn;
+ struct qed_sp_init_data init_data;
+ struct vport_update_ramrod_data *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ u8 abs_vport_id = 0, val;
+ int rc = -EINVAL;
+
+ if (IS_VF(p_hwfn->cdev)) {
+ rc = qed_vf_pf_vport_update(p_hwfn, p_params);
+ return rc;
+ }
+
+ rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
+ if (rc)
+ return rc;
+
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_params->opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_data;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ ETH_RAMROD_VPORT_UPDATE,
+ PROTOCOLID_ETH, &init_data);
+ if (rc)
+ return rc;
+
+ /* Copy input params to ramrod according to FW struct */
+ p_ramrod = &p_ent->ramrod.vport_update;
+ p_cmn = &p_ramrod->common;
+
+ p_cmn->vport_id = abs_vport_id;
+ p_cmn->rx_active_flg = p_params->vport_active_rx_flg;
+ p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg;
+ p_cmn->tx_active_flg = p_params->vport_active_tx_flg;
+ p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg;
+ p_cmn->accept_any_vlan = p_params->accept_any_vlan;
+ val = p_params->update_accept_any_vlan_flg;
+ p_cmn->update_accept_any_vlan_flg = val;
+
+ p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg;
+ val = p_params->update_inner_vlan_removal_flg;
+ p_cmn->update_inner_vlan_removal_en_flg = val;
+
+ p_cmn->default_vlan_en = p_params->default_vlan_enable_flg;
+ val = p_params->update_default_vlan_enable_flg;
+ p_cmn->update_default_vlan_en_flg = val;
+
+ p_cmn->default_vlan = cpu_to_le16(p_params->default_vlan);
+ p_cmn->update_default_vlan_flg = p_params->update_default_vlan_flg;
+
+ p_cmn->silent_vlan_removal_en = p_params->silent_vlan_removal_flg;
+
+ p_ramrod->common.tx_switching_en = p_params->tx_switching_flg;
+ p_cmn->update_tx_switching_en_flg = p_params->update_tx_switching_flg;
+
+ p_cmn->anti_spoofing_en = p_params->anti_spoofing_en;
+ val = p_params->update_anti_spoofing_en_flg;
+ p_ramrod->common.update_anti_spoofing_en_flg = val;
+
+ rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
+ if (rc) {
+ qed_sp_destroy_request(p_hwfn, p_ent);
+ return rc;
+ }
+
+ if (p_params->update_ctl_frame_check) {
+ p_cmn->ctl_frame_mac_check_en = p_params->mac_chk_en;
+ p_cmn->ctl_frame_ethtype_check_en = p_params->ethtype_chk_en;
+ }
+
+ /* Update mcast bins for VFs, PF doesn't use this functionality */
+ qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
+
+ qed_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags);
+ qed_sp_vport_update_sge_tpa(p_hwfn, p_ramrod, p_params->sge_tpa_params);
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id)
+{
+ struct vport_stop_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ u8 abs_vport_id = 0;
+ int rc;
+
+ if (IS_VF(p_hwfn->cdev))
+ return qed_vf_pf_vport_stop(p_hwfn);
+
+ rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
+ if (rc)
+ return rc;
+
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ ETH_RAMROD_VPORT_STOP,
+ PROTOCOLID_ETH, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.vport_stop;
+ p_ramrod->vport_id = abs_vport_id;
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int
+qed_vf_pf_accept_flags(struct qed_hwfn *p_hwfn,
+ struct qed_filter_accept_flags *p_accept_flags)
+{
+ struct qed_sp_vport_update_params s_params;
+
+ memset(&s_params, 0, sizeof(s_params));
+ memcpy(&s_params.accept_flags, p_accept_flags,
+ sizeof(struct qed_filter_accept_flags));
+
+ return qed_vf_pf_vport_update(p_hwfn, &s_params);
+}
+
+static int qed_filter_accept_cmd(struct qed_dev *cdev,
+ u8 vport,
+ struct qed_filter_accept_flags accept_flags,
+ u8 update_accept_any_vlan,
+ u8 accept_any_vlan,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data)
+{
+ struct qed_sp_vport_update_params vport_update_params;
+ int i, rc;
+
+ /* Prepare and send the vport rx_mode change */
+ memset(&vport_update_params, 0, sizeof(vport_update_params));
+ vport_update_params.vport_id = vport;
+ vport_update_params.accept_flags = accept_flags;
+ vport_update_params.update_accept_any_vlan_flg = update_accept_any_vlan;
+ vport_update_params.accept_any_vlan = accept_any_vlan;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+
+ if (IS_VF(cdev)) {
+ rc = qed_vf_pf_accept_flags(p_hwfn, &accept_flags);
+ if (rc)
+ return rc;
+ continue;
+ }
+
+ rc = qed_sp_vport_update(p_hwfn, &vport_update_params,
+ comp_mode, p_comp_data);
+ if (rc) {
+ DP_ERR(cdev, "Update rx_mode failed %d\n", rc);
+ return rc;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "Accept filter configured, flags = [Rx]%x [Tx]%x\n",
+ accept_flags.rx_accept_filter,
+ accept_flags.tx_accept_filter);
+ if (update_accept_any_vlan)
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "accept_any_vlan=%d configured\n",
+ accept_any_vlan);
+ }
+
+ return 0;
+}
+
+int qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
+ struct qed_queue_cid *p_cid,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size)
+{
+ struct rx_queue_start_ramrod_data *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc = -EINVAL;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "opaque_fid=0x%x, cid=0x%x, rx_qzone=0x%x, vport_id=0x%x, sb_id=0x%x\n",
+ p_cid->opaque_fid, p_cid->cid,
+ p_cid->abs.queue_id, p_cid->abs.vport_id, p_cid->sb_igu_id);
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_cid->cid;
+ init_data.opaque_fid = p_cid->opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ ETH_RAMROD_RX_QUEUE_START,
+ PROTOCOLID_ETH, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.rx_queue_start;
+
+ p_ramrod->sb_id = cpu_to_le16(p_cid->sb_igu_id);
+ p_ramrod->sb_index = p_cid->sb_idx;
+ p_ramrod->vport_id = p_cid->abs.vport_id;
+ p_ramrod->stats_counter_id = p_cid->abs.stats_id;
+ p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id);
+ p_ramrod->complete_cqe_flg = 0;
+ p_ramrod->complete_event_flg = 1;
+
+ p_ramrod->bd_max_bytes = cpu_to_le16(bd_max_bytes);
+ DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr);
+
+ p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
+ DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
+
+ if (p_cid->vfid != QED_QUEUE_CID_SELF) {
+ bool b_legacy_vf = !!(p_cid->vf_legacy &
+ QED_QCID_LEGACY_VF_RX_PROD);
+
+ p_ramrod->vf_rx_prod_index = p_cid->vf_qid;
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "Queue%s is meant for VF rxq[%02x]\n",
+ b_legacy_vf ? " [legacy]" : "", p_cid->vf_qid);
+ p_ramrod->vf_rx_prod_use_zone_a = b_legacy_vf;
+ }
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int
+qed_eth_pf_rx_queue_start(struct qed_hwfn *p_hwfn,
+ struct qed_queue_cid *p_cid,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size, void __iomem **pp_prod)
+{
+ u32 init_prod_val = 0;
+
+ *pp_prod = (u8 __iomem *)
+ p_hwfn->regview +
+ GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_MSDM_RAM,
+ MSTORM_ETH_PF_PRODS, p_cid->abs.queue_id);
+
+ /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
+ __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
+ (u32 *)(&init_prod_val));
+
+ return qed_eth_rxq_start_ramrod(p_hwfn, p_cid,
+ bd_max_bytes,
+ bd_chain_phys_addr,
+ cqe_pbl_addr, cqe_pbl_size);
+}
+
+static int
+qed_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct qed_queue_start_common_params *p_params,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size,
+ struct qed_rxq_start_ret_params *p_ret_params)
+{
+ struct qed_queue_cid *p_cid;
+ int rc;
+
+ /* Allocate a CID for the queue */
+ p_cid = qed_eth_queue_to_cid_pf(p_hwfn, opaque_fid, true, p_params);
+ if (!p_cid)
+ return -ENOMEM;
+
+ if (IS_PF(p_hwfn->cdev)) {
+ rc = qed_eth_pf_rx_queue_start(p_hwfn, p_cid,
+ bd_max_bytes,
+ bd_chain_phys_addr,
+ cqe_pbl_addr, cqe_pbl_size,
+ &p_ret_params->p_prod);
+ } else {
+ rc = qed_vf_pf_rxq_start(p_hwfn, p_cid,
+ bd_max_bytes,
+ bd_chain_phys_addr,
+ cqe_pbl_addr,
+ cqe_pbl_size, &p_ret_params->p_prod);
+ }
+
+ /* Provide the caller with a reference to as handler */
+ if (rc)
+ qed_eth_queue_cid_release(p_hwfn, p_cid);
+ else
+ p_ret_params->p_handle = (void *)p_cid;
+
+ return rc;
+}
+
+int qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
+ void **pp_rxq_handles,
+ u8 num_rxqs,
+ u8 complete_cqe_flg,
+ u8 complete_event_flg,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data)
+{
+ struct rx_queue_update_ramrod_data *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ struct qed_queue_cid *p_cid;
+ int rc = -EINVAL;
+ u8 i;
+
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_data;
+
+ for (i = 0; i < num_rxqs; i++) {
+ p_cid = ((struct qed_queue_cid **)pp_rxq_handles)[i];
+
+ /* Get SPQ entry */
+ init_data.cid = p_cid->cid;
+ init_data.opaque_fid = p_cid->opaque_fid;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ ETH_RAMROD_RX_QUEUE_UPDATE,
+ PROTOCOLID_ETH, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.rx_queue_update;
+ p_ramrod->vport_id = p_cid->abs.vport_id;
+
+ p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id);
+ p_ramrod->complete_cqe_flg = complete_cqe_flg;
+ p_ramrod->complete_event_flg = complete_event_flg;
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc)
+ return rc;
+ }
+
+ return rc;
+}
+
+static int
+qed_eth_pf_rx_queue_stop(struct qed_hwfn *p_hwfn,
+ struct qed_queue_cid *p_cid,
+ bool b_eq_completion_only, bool b_cqe_completion)
+{
+ struct rx_queue_stop_ramrod_data *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc;
+
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_cid->cid;
+ init_data.opaque_fid = p_cid->opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ ETH_RAMROD_RX_QUEUE_STOP,
+ PROTOCOLID_ETH, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.rx_queue_stop;
+ p_ramrod->vport_id = p_cid->abs.vport_id;
+ p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id);
+
+ /* Cleaning the queue requires the completion to arrive there.
+ * In addition, VFs require the answer to come as eqe to PF.
+ */
+ p_ramrod->complete_cqe_flg = ((p_cid->vfid == QED_QUEUE_CID_SELF) &&
+ !b_eq_completion_only) ||
+ b_cqe_completion;
+ p_ramrod->complete_event_flg = (p_cid->vfid != QED_QUEUE_CID_SELF) ||
+ b_eq_completion_only;
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+int qed_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
+ void *p_rxq,
+ bool eq_completion_only, bool cqe_completion)
+{
+ struct qed_queue_cid *p_cid = (struct qed_queue_cid *)p_rxq;
+ int rc = -EINVAL;
+
+ if (IS_PF(p_hwfn->cdev))
+ rc = qed_eth_pf_rx_queue_stop(p_hwfn, p_cid,
+ eq_completion_only,
+ cqe_completion);
+ else
+ rc = qed_vf_pf_rxq_stop(p_hwfn, p_cid, cqe_completion);
+
+ if (!rc)
+ qed_eth_queue_cid_release(p_hwfn, p_cid);
+ return rc;
+}
+
+int
+qed_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
+ struct qed_queue_cid *p_cid,
+ dma_addr_t pbl_addr, u16 pbl_size, u16 pq_id)
+{
+ struct tx_queue_start_ramrod_data *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc = -EINVAL;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_cid->cid;
+ init_data.opaque_fid = p_cid->opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ ETH_RAMROD_TX_QUEUE_START,
+ PROTOCOLID_ETH, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.tx_queue_start;
+ p_ramrod->vport_id = p_cid->abs.vport_id;
+
+ p_ramrod->sb_id = cpu_to_le16(p_cid->sb_igu_id);
+ p_ramrod->sb_index = p_cid->sb_idx;
+ p_ramrod->stats_counter_id = p_cid->abs.stats_id;
+
+ p_ramrod->queue_zone_id = cpu_to_le16(p_cid->abs.queue_id);
+ p_ramrod->same_as_last_id = cpu_to_le16(p_cid->abs.queue_id);
+
+ p_ramrod->pbl_size = cpu_to_le16(pbl_size);
+ DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr);
+
+ p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int
+qed_eth_pf_tx_queue_start(struct qed_hwfn *p_hwfn,
+ struct qed_queue_cid *p_cid,
+ u8 tc,
+ dma_addr_t pbl_addr,
+ u16 pbl_size, void __iomem **pp_doorbell)
+{
+ int rc;
+
+ rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid,
+ pbl_addr, pbl_size,
+ qed_get_cm_pq_idx_mcos(p_hwfn, tc));
+ if (rc)
+ return rc;
+
+ /* Provide the caller with the necessary return values */
+ *pp_doorbell = p_hwfn->doorbells +
+ qed_db_addr(p_cid->cid, DQ_DEMS_LEGACY);
+
+ return 0;
+}
+
+static int
+qed_eth_tx_queue_start(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct qed_queue_start_common_params *p_params,
+ u8 tc,
+ dma_addr_t pbl_addr,
+ u16 pbl_size,
+ struct qed_txq_start_ret_params *p_ret_params)
+{
+ struct qed_queue_cid *p_cid;
+ int rc;
+
+ p_cid = qed_eth_queue_to_cid_pf(p_hwfn, opaque_fid, false, p_params);
+ if (!p_cid)
+ return -EINVAL;
+
+ if (IS_PF(p_hwfn->cdev))
+ rc = qed_eth_pf_tx_queue_start(p_hwfn, p_cid, tc,
+ pbl_addr, pbl_size,
+ &p_ret_params->p_doorbell);
+ else
+ rc = qed_vf_pf_txq_start(p_hwfn, p_cid,
+ pbl_addr, pbl_size,
+ &p_ret_params->p_doorbell);
+
+ if (rc)
+ qed_eth_queue_cid_release(p_hwfn, p_cid);
+ else
+ p_ret_params->p_handle = (void *)p_cid;
+
+ return rc;
+}
+
+static int
+qed_eth_pf_tx_queue_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid)
+{
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc;
+
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_cid->cid;
+ init_data.opaque_fid = p_cid->opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ ETH_RAMROD_TX_QUEUE_STOP,
+ PROTOCOLID_ETH, &init_data);
+ if (rc)
+ return rc;
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+int qed_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, void *p_handle)
+{
+ struct qed_queue_cid *p_cid = (struct qed_queue_cid *)p_handle;
+ int rc;
+
+ if (IS_PF(p_hwfn->cdev))
+ rc = qed_eth_pf_tx_queue_stop(p_hwfn, p_cid);
+ else
+ rc = qed_vf_pf_txq_stop(p_hwfn, p_cid);
+
+ if (!rc)
+ qed_eth_queue_cid_release(p_hwfn, p_cid);
+ return rc;
+}
+
+static enum eth_filter_action qed_filter_action(enum qed_filter_opcode opcode)
+{
+ enum eth_filter_action action = MAX_ETH_FILTER_ACTION;
+
+ switch (opcode) {
+ case QED_FILTER_ADD:
+ action = ETH_FILTER_ACTION_ADD;
+ break;
+ case QED_FILTER_REMOVE:
+ action = ETH_FILTER_ACTION_REMOVE;
+ break;
+ case QED_FILTER_FLUSH:
+ action = ETH_FILTER_ACTION_REMOVE_ALL;
+ break;
+ default:
+ action = MAX_ETH_FILTER_ACTION;
+ }
+
+ return action;
+}
+
+static int
+qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct qed_filter_ucast *p_filter_cmd,
+ struct vport_filter_update_ramrod_data **pp_ramrod,
+ struct qed_spq_entry **pp_ent,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data)
+{
+ u8 vport_to_add_to = 0, vport_to_remove_from = 0;
+ struct vport_filter_update_ramrod_data *p_ramrod;
+ struct eth_filter_cmd *p_first_filter;
+ struct eth_filter_cmd *p_second_filter;
+ struct qed_sp_init_data init_data;
+ enum eth_filter_action action;
+ int rc;
+
+ rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
+ &vport_to_remove_from);
+ if (rc)
+ return rc;
+
+ rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
+ &vport_to_add_to);
+ if (rc)
+ return rc;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_data;
+
+ rc = qed_sp_init_request(p_hwfn, pp_ent,
+ ETH_RAMROD_FILTERS_UPDATE,
+ PROTOCOLID_ETH, &init_data);
+ if (rc)
+ return rc;
+
+ *pp_ramrod = &(*pp_ent)->ramrod.vport_filter_update;
+ p_ramrod = *pp_ramrod;
+ p_ramrod->filter_cmd_hdr.rx = p_filter_cmd->is_rx_filter ? 1 : 0;
+ p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0;
+
+ switch (p_filter_cmd->opcode) {
+ case QED_FILTER_REPLACE:
+ case QED_FILTER_MOVE:
+ p_ramrod->filter_cmd_hdr.cmd_cnt = 2; break;
+ default:
+ p_ramrod->filter_cmd_hdr.cmd_cnt = 1; break;
+ }
+
+ p_first_filter = &p_ramrod->filter_cmds[0];
+ p_second_filter = &p_ramrod->filter_cmds[1];
+
+ switch (p_filter_cmd->type) {
+ case QED_FILTER_MAC:
+ p_first_filter->type = ETH_FILTER_TYPE_MAC; break;
+ case QED_FILTER_VLAN:
+ p_first_filter->type = ETH_FILTER_TYPE_VLAN; break;
+ case QED_FILTER_MAC_VLAN:
+ p_first_filter->type = ETH_FILTER_TYPE_PAIR; break;
+ case QED_FILTER_INNER_MAC:
+ p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC; break;
+ case QED_FILTER_INNER_VLAN:
+ p_first_filter->type = ETH_FILTER_TYPE_INNER_VLAN; break;
+ case QED_FILTER_INNER_PAIR:
+ p_first_filter->type = ETH_FILTER_TYPE_INNER_PAIR; break;
+ case QED_FILTER_INNER_MAC_VNI_PAIR:
+ p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR;
+ break;
+ case QED_FILTER_MAC_VNI_PAIR:
+ p_first_filter->type = ETH_FILTER_TYPE_MAC_VNI_PAIR; break;
+ case QED_FILTER_VNI:
+ p_first_filter->type = ETH_FILTER_TYPE_VNI; break;
+ }
+
+ if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR)) {
+ qed_set_fw_mac_addr(&p_first_filter->mac_msb,
+ &p_first_filter->mac_mid,
+ &p_first_filter->mac_lsb,
+ (u8 *)p_filter_cmd->mac);
+ }
+
+ if ((p_first_filter->type == ETH_FILTER_TYPE_VLAN) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_INNER_VLAN) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR))
+ p_first_filter->vlan_id = cpu_to_le16(p_filter_cmd->vlan);
+
+ if ((p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR) ||
+ (p_first_filter->type == ETH_FILTER_TYPE_VNI))
+ p_first_filter->vni = cpu_to_le32(p_filter_cmd->vni);
+
+ if (p_filter_cmd->opcode == QED_FILTER_MOVE) {
+ p_second_filter->type = p_first_filter->type;
+ p_second_filter->mac_msb = p_first_filter->mac_msb;
+ p_second_filter->mac_mid = p_first_filter->mac_mid;
+ p_second_filter->mac_lsb = p_first_filter->mac_lsb;
+ p_second_filter->vlan_id = p_first_filter->vlan_id;
+ p_second_filter->vni = p_first_filter->vni;
+
+ p_first_filter->action = ETH_FILTER_ACTION_REMOVE;
+
+ p_first_filter->vport_id = vport_to_remove_from;
+
+ p_second_filter->action = ETH_FILTER_ACTION_ADD;
+ p_second_filter->vport_id = vport_to_add_to;
+ } else if (p_filter_cmd->opcode == QED_FILTER_REPLACE) {
+ p_first_filter->vport_id = vport_to_add_to;
+ memcpy(p_second_filter, p_first_filter,
+ sizeof(*p_second_filter));
+ p_first_filter->action = ETH_FILTER_ACTION_REMOVE_ALL;
+ p_second_filter->action = ETH_FILTER_ACTION_ADD;
+ } else {
+ action = qed_filter_action(p_filter_cmd->opcode);
+
+ if (action == MAX_ETH_FILTER_ACTION) {
+ DP_NOTICE(p_hwfn,
+ "%d is not supported yet\n",
+ p_filter_cmd->opcode);
+ qed_sp_destroy_request(p_hwfn, *pp_ent);
+ return -EINVAL;
+ }
+
+ p_first_filter->action = action;
+ p_first_filter->vport_id = (p_filter_cmd->opcode ==
+ QED_FILTER_REMOVE) ?
+ vport_to_remove_from :
+ vport_to_add_to;
+ }
+
+ return 0;
+}
+
+int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct qed_filter_ucast *p_filter_cmd,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data)
+{
+ struct vport_filter_update_ramrod_data *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct eth_filter_cmd_header *p_header;
+ int rc;
+
+ rc = qed_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd,
+ &p_ramrod, &p_ent,
+ comp_mode, p_comp_data);
+ if (rc) {
+ DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc);
+ return rc;
+ }
+ p_header = &p_ramrod->filter_cmd_hdr;
+ p_header->assert_on_error = p_filter_cmd->assert_on_error;
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc) {
+ DP_ERR(p_hwfn, "Unicast filter ADD command failed %d\n", rc);
+ return rc;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n",
+ (p_filter_cmd->opcode == QED_FILTER_ADD) ? "ADD" :
+ ((p_filter_cmd->opcode == QED_FILTER_REMOVE) ?
+ "REMOVE" :
+ ((p_filter_cmd->opcode == QED_FILTER_MOVE) ?
+ "MOVE" : "REPLACE")),
+ (p_filter_cmd->type == QED_FILTER_MAC) ? "MAC" :
+ ((p_filter_cmd->type == QED_FILTER_VLAN) ?
+ "VLAN" : "MAC & VLAN"),
+ p_ramrod->filter_cmd_hdr.cmd_cnt,
+ p_filter_cmd->is_rx_filter,
+ p_filter_cmd->is_tx_filter);
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n",
+ p_filter_cmd->vport_to_add_to,
+ p_filter_cmd->vport_to_remove_from,
+ p_filter_cmd->mac[0],
+ p_filter_cmd->mac[1],
+ p_filter_cmd->mac[2],
+ p_filter_cmd->mac[3],
+ p_filter_cmd->mac[4],
+ p_filter_cmd->mac[5],
+ p_filter_cmd->vlan);
+
+ return 0;
+}
+
+/*******************************************************************************
+ * Description:
+ * Calculates crc 32 on a buffer
+ * Note: crc32_length MUST be aligned to 8
+ * Return:
+ ******************************************************************************/
+static u32 qed_calc_crc32c(u8 *crc32_packet,
+ u32 crc32_length, u32 crc32_seed, u8 complement)
+{
+ u32 byte = 0, bit = 0, crc32_result = crc32_seed;
+ u8 msb = 0, current_byte = 0;
+
+ if ((!crc32_packet) ||
+ (crc32_length == 0) ||
+ ((crc32_length % 8) != 0))
+ return crc32_result;
+ for (byte = 0; byte < crc32_length; byte++) {
+ current_byte = crc32_packet[byte];
+ for (bit = 0; bit < 8; bit++) {
+ msb = (u8)(crc32_result >> 31);
+ crc32_result = crc32_result << 1;
+ if (msb != (0x1 & (current_byte >> bit))) {
+ crc32_result = crc32_result ^ CRC32_POLY;
+ crc32_result |= 1; /*crc32_result[0] = 1;*/
+ }
+ }
+ }
+ return crc32_result;
+}
+
+static u32 qed_crc32c_le(u32 seed, u8 *mac, u32 len)
+{
+ u32 packet_buf[2] = { 0 };
+
+ memcpy((u8 *)(&packet_buf[0]), &mac[0], 6);
+ return qed_calc_crc32c((u8 *)packet_buf, 8, seed, 0);
+}
+
+u8 qed_mcast_bin_from_mac(u8 *mac)
+{
+ u32 crc = qed_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED,
+ mac, ETH_ALEN);
+
+ return crc & 0xff;
+}
+
+static int
+qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct qed_filter_mcast *p_filter_cmd,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data)
+{
+ struct vport_update_ramrod_data *p_ramrod = NULL;
+ u32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ u8 abs_vport_id = 0;
+ int rc, i;
+
+ if (p_filter_cmd->opcode == QED_FILTER_ADD)
+ rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
+ &abs_vport_id);
+ else
+ rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
+ &abs_vport_id);
+ if (rc)
+ return rc;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_data;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ ETH_RAMROD_VPORT_UPDATE,
+ PROTOCOLID_ETH, &init_data);
+ if (rc) {
+ DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc);
+ return rc;
+ }
+
+ p_ramrod = &p_ent->ramrod.vport_update;
+ p_ramrod->common.update_approx_mcast_flg = 1;
+
+ /* explicitly clear out the entire vector */
+ memset(&p_ramrod->approx_mcast.bins, 0,
+ sizeof(p_ramrod->approx_mcast.bins));
+ memset(bins, 0, sizeof(bins));
+ /* filter ADD op is explicit set op and it removes
+ * any existing filters for the vport
+ */
+ if (p_filter_cmd->opcode == QED_FILTER_ADD) {
+ for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
+ u32 bit, nbits;
+
+ bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
+ nbits = sizeof(u32) * BITS_PER_BYTE;
+ bins[bit / nbits] |= 1 << (bit % nbits);
+ }
+
+ /* Convert to correct endianity */
+ for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
+ struct vport_update_ramrod_mcast *p_ramrod_bins;
+
+ p_ramrod_bins = &p_ramrod->approx_mcast;
+ p_ramrod_bins->bins[i] = cpu_to_le32(bins[i]);
+ }
+ }
+
+ p_ramrod->common.vport_id = abs_vport_id;
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_filter_mcast_cmd(struct qed_dev *cdev,
+ struct qed_filter_mcast *p_filter_cmd,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data)
+{
+ int rc = 0;
+ int i;
+
+ /* only ADD and REMOVE operations are supported for multi-cast */
+ if ((p_filter_cmd->opcode != QED_FILTER_ADD &&
+ (p_filter_cmd->opcode != QED_FILTER_REMOVE)) ||
+ (p_filter_cmd->num_mc_addrs > QED_MAX_MC_ADDRS))
+ return -EINVAL;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ u16 opaque_fid;
+
+ if (IS_VF(cdev)) {
+ qed_vf_pf_filter_mcast(p_hwfn, p_filter_cmd);
+ continue;
+ }
+
+ opaque_fid = p_hwfn->hw_info.opaque_fid;
+
+ rc = qed_sp_eth_filter_mcast(p_hwfn,
+ opaque_fid,
+ p_filter_cmd,
+ comp_mode, p_comp_data);
+ }
+ return rc;
+}
+
+static int qed_filter_ucast_cmd(struct qed_dev *cdev,
+ struct qed_filter_ucast *p_filter_cmd,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data)
+{
+ int rc = 0;
+ int i;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ u16 opaque_fid;
+
+ if (IS_VF(cdev)) {
+ rc = qed_vf_pf_filter_ucast(p_hwfn, p_filter_cmd);
+ continue;
+ }
+
+ opaque_fid = p_hwfn->hw_info.opaque_fid;
+
+ rc = qed_sp_eth_filter_ucast(p_hwfn,
+ opaque_fid,
+ p_filter_cmd,
+ comp_mode, p_comp_data);
+ if (rc)
+ break;
+ }
+
+ return rc;
+}
+
+/* Statistics related code */
+static void __qed_get_vport_pstats_addrlen(struct qed_hwfn *p_hwfn,
+ u32 *p_addr,
+ u32 *p_len, u16 statistics_bin)
+{
+ if (IS_PF(p_hwfn->cdev)) {
+ *p_addr = BAR0_MAP_REG_PSDM_RAM +
+ PSTORM_QUEUE_STAT_OFFSET(statistics_bin);
+ *p_len = sizeof(struct eth_pstorm_per_queue_stat);
+ } else {
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
+
+ *p_addr = p_resp->pfdev_info.stats_info.pstats.address;
+ *p_len = p_resp->pfdev_info.stats_info.pstats.len;
+ }
+}
+
+static noinline_for_stack void
+__qed_get_vport_pstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ struct qed_eth_stats *p_stats, u16 statistics_bin)
+{
+ struct eth_pstorm_per_queue_stat pstats;
+ u32 pstats_addr = 0, pstats_len = 0;
+
+ __qed_get_vport_pstats_addrlen(p_hwfn, &pstats_addr, &pstats_len,
+ statistics_bin);
+
+ memset(&pstats, 0, sizeof(pstats));
+ qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len);
+
+ p_stats->common.tx_ucast_bytes +=
+ HILO_64_REGPAIR(pstats.sent_ucast_bytes);
+ p_stats->common.tx_mcast_bytes +=
+ HILO_64_REGPAIR(pstats.sent_mcast_bytes);
+ p_stats->common.tx_bcast_bytes +=
+ HILO_64_REGPAIR(pstats.sent_bcast_bytes);
+ p_stats->common.tx_ucast_pkts +=
+ HILO_64_REGPAIR(pstats.sent_ucast_pkts);
+ p_stats->common.tx_mcast_pkts +=
+ HILO_64_REGPAIR(pstats.sent_mcast_pkts);
+ p_stats->common.tx_bcast_pkts +=
+ HILO_64_REGPAIR(pstats.sent_bcast_pkts);
+ p_stats->common.tx_err_drop_pkts +=
+ HILO_64_REGPAIR(pstats.error_drop_pkts);
+}
+
+static noinline_for_stack void
+__qed_get_vport_tstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ struct qed_eth_stats *p_stats, u16 statistics_bin)
+{
+ struct tstorm_per_port_stat tstats;
+ u32 tstats_addr, tstats_len;
+
+ if (IS_PF(p_hwfn->cdev)) {
+ tstats_addr = BAR0_MAP_REG_TSDM_RAM +
+ TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
+ tstats_len = sizeof(struct tstorm_per_port_stat);
+ } else {
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
+
+ tstats_addr = p_resp->pfdev_info.stats_info.tstats.address;
+ tstats_len = p_resp->pfdev_info.stats_info.tstats.len;
+ }
+
+ memset(&tstats, 0, sizeof(tstats));
+ qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len);
+
+ p_stats->common.mftag_filter_discards +=
+ HILO_64_REGPAIR(tstats.mftag_filter_discard);
+ p_stats->common.mac_filter_discards +=
+ HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
+ p_stats->common.gft_filter_drop +=
+ HILO_64_REGPAIR(tstats.eth_gft_drop_pkt);
+}
+
+static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn,
+ u32 *p_addr,
+ u32 *p_len, u16 statistics_bin)
+{
+ if (IS_PF(p_hwfn->cdev)) {
+ *p_addr = BAR0_MAP_REG_USDM_RAM +
+ USTORM_QUEUE_STAT_OFFSET(statistics_bin);
+ *p_len = sizeof(struct eth_ustorm_per_queue_stat);
+ } else {
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
+
+ *p_addr = p_resp->pfdev_info.stats_info.ustats.address;
+ *p_len = p_resp->pfdev_info.stats_info.ustats.len;
+ }
+}
+
+static noinline_for_stack
+void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ struct qed_eth_stats *p_stats, u16 statistics_bin)
+{
+ struct eth_ustorm_per_queue_stat ustats;
+ u32 ustats_addr = 0, ustats_len = 0;
+
+ __qed_get_vport_ustats_addrlen(p_hwfn, &ustats_addr, &ustats_len,
+ statistics_bin);
+
+ memset(&ustats, 0, sizeof(ustats));
+ qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len);
+
+ p_stats->common.rx_ucast_bytes +=
+ HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
+ p_stats->common.rx_mcast_bytes +=
+ HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
+ p_stats->common.rx_bcast_bytes +=
+ HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
+ p_stats->common.rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
+ p_stats->common.rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
+ p_stats->common.rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
+}
+
+static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn,
+ u32 *p_addr,
+ u32 *p_len, u16 statistics_bin)
+{
+ if (IS_PF(p_hwfn->cdev)) {
+ *p_addr = BAR0_MAP_REG_MSDM_RAM +
+ MSTORM_QUEUE_STAT_OFFSET(statistics_bin);
+ *p_len = sizeof(struct eth_mstorm_per_queue_stat);
+ } else {
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
+
+ *p_addr = p_resp->pfdev_info.stats_info.mstats.address;
+ *p_len = p_resp->pfdev_info.stats_info.mstats.len;
+ }
+}
+
+static noinline_for_stack void
+__qed_get_vport_mstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ struct qed_eth_stats *p_stats, u16 statistics_bin)
+{
+ struct eth_mstorm_per_queue_stat mstats;
+ u32 mstats_addr = 0, mstats_len = 0;
+
+ __qed_get_vport_mstats_addrlen(p_hwfn, &mstats_addr, &mstats_len,
+ statistics_bin);
+
+ memset(&mstats, 0, sizeof(mstats));
+ qed_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len);
+
+ p_stats->common.no_buff_discards +=
+ HILO_64_REGPAIR(mstats.no_buff_discard);
+ p_stats->common.packet_too_big_discard +=
+ HILO_64_REGPAIR(mstats.packet_too_big_discard);
+ p_stats->common.ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard);
+ p_stats->common.tpa_coalesced_pkts +=
+ HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
+ p_stats->common.tpa_coalesced_events +=
+ HILO_64_REGPAIR(mstats.tpa_coalesced_events);
+ p_stats->common.tpa_aborts_num +=
+ HILO_64_REGPAIR(mstats.tpa_aborts_num);
+ p_stats->common.tpa_coalesced_bytes +=
+ HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
+}
+
+static noinline_for_stack void
+__qed_get_vport_port_stats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ struct qed_eth_stats *p_stats)
+{
+ struct qed_eth_stats_common *p_common = &p_stats->common;
+ struct port_stats port_stats;
+ int j;
+
+ memset(&port_stats, 0, sizeof(port_stats));
+
+ qed_memcpy_from(p_hwfn, p_ptt, &port_stats,
+ p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, stats),
+ sizeof(port_stats));
+
+ p_common->rx_64_byte_packets += port_stats.eth.r64;
+ p_common->rx_65_to_127_byte_packets += port_stats.eth.r127;
+ p_common->rx_128_to_255_byte_packets += port_stats.eth.r255;
+ p_common->rx_256_to_511_byte_packets += port_stats.eth.r511;
+ p_common->rx_512_to_1023_byte_packets += port_stats.eth.r1023;
+ p_common->rx_1024_to_1518_byte_packets += port_stats.eth.r1518;
+ p_common->rx_crc_errors += port_stats.eth.rfcs;
+ p_common->rx_mac_crtl_frames += port_stats.eth.rxcf;
+ p_common->rx_pause_frames += port_stats.eth.rxpf;
+ p_common->rx_pfc_frames += port_stats.eth.rxpp;
+ p_common->rx_align_errors += port_stats.eth.raln;
+ p_common->rx_carrier_errors += port_stats.eth.rfcr;
+ p_common->rx_oversize_packets += port_stats.eth.rovr;
+ p_common->rx_jabbers += port_stats.eth.rjbr;
+ p_common->rx_undersize_packets += port_stats.eth.rund;
+ p_common->rx_fragments += port_stats.eth.rfrg;
+ p_common->tx_64_byte_packets += port_stats.eth.t64;
+ p_common->tx_65_to_127_byte_packets += port_stats.eth.t127;
+ p_common->tx_128_to_255_byte_packets += port_stats.eth.t255;
+ p_common->tx_256_to_511_byte_packets += port_stats.eth.t511;
+ p_common->tx_512_to_1023_byte_packets += port_stats.eth.t1023;
+ p_common->tx_1024_to_1518_byte_packets += port_stats.eth.t1518;
+ p_common->tx_pause_frames += port_stats.eth.txpf;
+ p_common->tx_pfc_frames += port_stats.eth.txpp;
+ p_common->rx_mac_bytes += port_stats.eth.rbyte;
+ p_common->rx_mac_uc_packets += port_stats.eth.rxuca;
+ p_common->rx_mac_mc_packets += port_stats.eth.rxmca;
+ p_common->rx_mac_bc_packets += port_stats.eth.rxbca;
+ p_common->rx_mac_frames_ok += port_stats.eth.rxpok;
+ p_common->tx_mac_bytes += port_stats.eth.tbyte;
+ p_common->tx_mac_uc_packets += port_stats.eth.txuca;
+ p_common->tx_mac_mc_packets += port_stats.eth.txmca;
+ p_common->tx_mac_bc_packets += port_stats.eth.txbca;
+ p_common->tx_mac_ctrl_frames += port_stats.eth.txcf;
+ for (j = 0; j < 8; j++) {
+ p_common->brb_truncates += port_stats.brb.brb_truncate[j];
+ p_common->brb_discards += port_stats.brb.brb_discard[j];
+ }
+
+ if (QED_IS_BB(p_hwfn->cdev)) {
+ struct qed_eth_stats_bb *p_bb = &p_stats->bb;
+
+ p_bb->rx_1519_to_1522_byte_packets +=
+ port_stats.eth.u0.bb0.r1522;
+ p_bb->rx_1519_to_2047_byte_packets +=
+ port_stats.eth.u0.bb0.r2047;
+ p_bb->rx_2048_to_4095_byte_packets +=
+ port_stats.eth.u0.bb0.r4095;
+ p_bb->rx_4096_to_9216_byte_packets +=
+ port_stats.eth.u0.bb0.r9216;
+ p_bb->rx_9217_to_16383_byte_packets +=
+ port_stats.eth.u0.bb0.r16383;
+ p_bb->tx_1519_to_2047_byte_packets +=
+ port_stats.eth.u1.bb1.t2047;
+ p_bb->tx_2048_to_4095_byte_packets +=
+ port_stats.eth.u1.bb1.t4095;
+ p_bb->tx_4096_to_9216_byte_packets +=
+ port_stats.eth.u1.bb1.t9216;
+ p_bb->tx_9217_to_16383_byte_packets +=
+ port_stats.eth.u1.bb1.t16383;
+ p_bb->tx_lpi_entry_count += port_stats.eth.u2.bb2.tlpiec;
+ p_bb->tx_total_collisions += port_stats.eth.u2.bb2.tncl;
+ } else {
+ struct qed_eth_stats_ah *p_ah = &p_stats->ah;
+
+ p_ah->rx_1519_to_max_byte_packets +=
+ port_stats.eth.u0.ah0.r1519_to_max;
+ p_ah->tx_1519_to_max_byte_packets =
+ port_stats.eth.u1.ah1.t1519_to_max;
+ }
+
+ p_common->link_change_count = qed_rd(p_hwfn, p_ptt,
+ p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port,
+ link_change_count));
+}
+
+static void __qed_get_vport_stats(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_eth_stats *stats,
+ u16 statistics_bin, bool b_get_port_stats)
+{
+ __qed_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin);
+ __qed_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin);
+ __qed_get_vport_tstats(p_hwfn, p_ptt, stats, statistics_bin);
+ __qed_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin);
+
+ if (b_get_port_stats && p_hwfn->mcp_info)
+ __qed_get_vport_port_stats(p_hwfn, p_ptt, stats);
+}
+
+static void _qed_get_vport_stats(struct qed_dev *cdev,
+ struct qed_eth_stats *stats,
+ bool is_atomic)
+{
+ u8 fw_vport = 0;
+ int i;
+
+ memset(stats, 0, sizeof(*stats));
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ struct qed_ptt *p_ptt;
+ bool b_get_port_stats;
+
+ p_ptt = IS_PF(cdev) ? qed_ptt_acquire_context(p_hwfn, is_atomic)
+ : NULL;
+ if (IS_PF(cdev)) {
+ /* The main vport index is relative first */
+ if (qed_fw_vport(p_hwfn, 0, &fw_vport)) {
+ DP_ERR(p_hwfn, "No vport available!\n");
+ goto out;
+ }
+ }
+
+ if (IS_PF(cdev) && !p_ptt) {
+ DP_ERR(p_hwfn, "Failed to acquire ptt\n");
+ continue;
+ }
+
+ b_get_port_stats = IS_PF(cdev) && IS_LEAD_HWFN(p_hwfn);
+ __qed_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport,
+ b_get_port_stats);
+
+out:
+ if (IS_PF(cdev) && p_ptt)
+ qed_ptt_release(p_hwfn, p_ptt);
+ }
+}
+
+void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats)
+{
+ qed_get_vport_stats_context(cdev, stats, false);
+}
+
+void qed_get_vport_stats_context(struct qed_dev *cdev,
+ struct qed_eth_stats *stats,
+ bool is_atomic)
+{
+ u32 i;
+
+ if (!cdev || cdev->recov_in_prog) {
+ memset(stats, 0, sizeof(*stats));
+ return;
+ }
+
+ _qed_get_vport_stats(cdev, stats, is_atomic);
+
+ if (!cdev->reset_stats)
+ return;
+
+ /* Reduce the statistics baseline */
+ for (i = 0; i < sizeof(struct qed_eth_stats) / sizeof(u64); i++)
+ ((u64 *)stats)[i] -= ((u64 *)cdev->reset_stats)[i];
+}
+
+/* zeroes V-PORT specific portion of stats (Port stats remains untouched) */
+void qed_reset_vport_stats(struct qed_dev *cdev)
+{
+ int i;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ struct eth_mstorm_per_queue_stat mstats;
+ struct eth_ustorm_per_queue_stat ustats;
+ struct eth_pstorm_per_queue_stat pstats;
+ struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn)
+ : NULL;
+ u32 addr = 0, len = 0;
+
+ if (IS_PF(cdev) && !p_ptt) {
+ DP_ERR(p_hwfn, "Failed to acquire ptt\n");
+ continue;
+ }
+
+ memset(&mstats, 0, sizeof(mstats));
+ __qed_get_vport_mstats_addrlen(p_hwfn, &addr, &len, 0);
+ qed_memcpy_to(p_hwfn, p_ptt, addr, &mstats, len);
+
+ memset(&ustats, 0, sizeof(ustats));
+ __qed_get_vport_ustats_addrlen(p_hwfn, &addr, &len, 0);
+ qed_memcpy_to(p_hwfn, p_ptt, addr, &ustats, len);
+
+ memset(&pstats, 0, sizeof(pstats));
+ __qed_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0);
+ qed_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len);
+
+ if (IS_PF(cdev))
+ qed_ptt_release(p_hwfn, p_ptt);
+ }
+
+ /* PORT statistics are not necessarily reset, so we need to
+ * read and create a baseline for future statistics.
+ * Link change stat is maintained by MFW, return its value as is.
+ */
+ if (!cdev->reset_stats) {
+ DP_INFO(cdev, "Reset stats not allocated\n");
+ } else {
+ _qed_get_vport_stats(cdev, cdev->reset_stats, false);
+ cdev->reset_stats->common.link_change_count = 0;
+ }
+}
+
+static enum gft_profile_type
+qed_arfs_mode_to_hsi(enum qed_filter_config_mode mode)
+{
+ if (mode == QED_FILTER_CONFIG_MODE_5_TUPLE)
+ return GFT_PROFILE_TYPE_4_TUPLE;
+ if (mode == QED_FILTER_CONFIG_MODE_IP_DEST)
+ return GFT_PROFILE_TYPE_IP_DST_ADDR;
+ if (mode == QED_FILTER_CONFIG_MODE_IP_SRC)
+ return GFT_PROFILE_TYPE_IP_SRC_ADDR;
+ return GFT_PROFILE_TYPE_L4_DST_PORT;
+}
+
+void qed_arfs_mode_configure(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_arfs_config_params *p_cfg_params)
+{
+ if (test_bit(QED_MF_DISABLE_ARFS, &p_hwfn->cdev->mf_bits))
+ return;
+
+ if (p_cfg_params->mode != QED_FILTER_CONFIG_MODE_DISABLE) {
+ qed_gft_config(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
+ p_cfg_params->tcp,
+ p_cfg_params->udp,
+ p_cfg_params->ipv4,
+ p_cfg_params->ipv6,
+ qed_arfs_mode_to_hsi(p_cfg_params->mode));
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SP,
+ "Configured Filtering: tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s mode=%08x\n",
+ p_cfg_params->tcp ? "Enable" : "Disable",
+ p_cfg_params->udp ? "Enable" : "Disable",
+ p_cfg_params->ipv4 ? "Enable" : "Disable",
+ p_cfg_params->ipv6 ? "Enable" : "Disable",
+ (u32)p_cfg_params->mode);
+ } else {
+ DP_VERBOSE(p_hwfn, QED_MSG_SP, "Disabled Filtering\n");
+ qed_gft_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
+ }
+}
+
+int
+qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
+ struct qed_spq_comp_cb *p_cb,
+ struct qed_ntuple_filter_params *p_params)
+{
+ struct rx_update_gft_filter_ramrod_data *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ u16 abs_rx_q_id = 0;
+ u8 abs_vport_id = 0;
+ int rc = -EINVAL;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+
+ if (p_cb) {
+ init_data.comp_mode = QED_SPQ_MODE_CB;
+ init_data.p_comp_data = p_cb;
+ } else {
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+ }
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ ETH_RAMROD_RX_UPDATE_GFT_FILTER,
+ PROTOCOLID_ETH, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.rx_update_gft;
+
+ DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_params->addr);
+ p_ramrod->pkt_hdr_length = cpu_to_le16(p_params->length);
+
+ if (p_params->b_is_drop) {
+ p_ramrod->vport_id = cpu_to_le16(ETH_GFT_TRASHCAN_VPORT);
+ } else {
+ rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
+ if (rc)
+ goto err;
+
+ if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) {
+ rc = qed_fw_l2_queue(p_hwfn, p_params->qid,
+ &abs_rx_q_id);
+ if (rc)
+ goto err;
+
+ p_ramrod->rx_qid_valid = 1;
+ p_ramrod->rx_qid = cpu_to_le16(abs_rx_q_id);
+ }
+
+ p_ramrod->vport_id = cpu_to_le16((u16)abs_vport_id);
+ }
+
+ p_ramrod->flow_id_valid = 0;
+ p_ramrod->flow_id = 0;
+ p_ramrod->filter_action = p_params->b_is_add ? GFT_ADD_FILTER
+ : GFT_DELETE_FILTER;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "V[%0x], Q[%04x] - %s filter from 0x%llx [length %04xb]\n",
+ abs_vport_id, abs_rx_q_id,
+ p_params->b_is_add ? "Adding" : "Removing",
+ (u64)p_params->addr, p_params->length);
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+
+err:
+ qed_sp_destroy_request(p_hwfn, p_ent);
+ return rc;
+}
+
+int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_queue_cid *p_cid, u16 *p_rx_coal)
+{
+ u32 coalesce, address, is_valid;
+ struct cau_sb_entry sb_entry;
+ u8 timer_res;
+ int rc;
+
+ rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
+ p_cid->sb_igu_id * sizeof(u64),
+ (u64)(uintptr_t)&sb_entry, 2, NULL);
+ if (rc) {
+ DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
+ return rc;
+ }
+
+ timer_res = GET_FIELD(le32_to_cpu(sb_entry.params),
+ CAU_SB_ENTRY_TIMER_RES0);
+
+ address = BAR0_MAP_REG_USDM_RAM +
+ USTORM_ETH_QUEUE_ZONE_GTT_OFFSET(p_cid->abs.queue_id);
+ coalesce = qed_rd(p_hwfn, p_ptt, address);
+
+ is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID);
+ if (!is_valid)
+ return -EINVAL;
+
+ coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET);
+ *p_rx_coal = (u16)(coalesce << timer_res);
+
+ return 0;
+}
+
+int qed_get_txq_coalesce(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_queue_cid *p_cid, u16 *p_tx_coal)
+{
+ u32 coalesce, address, is_valid;
+ struct cau_sb_entry sb_entry;
+ u8 timer_res;
+ int rc;
+
+ rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
+ p_cid->sb_igu_id * sizeof(u64),
+ (u64)(uintptr_t)&sb_entry, 2, NULL);
+ if (rc) {
+ DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
+ return rc;
+ }
+
+ timer_res = GET_FIELD(le32_to_cpu(sb_entry.params),
+ CAU_SB_ENTRY_TIMER_RES1);
+
+ address = BAR0_MAP_REG_XSDM_RAM +
+ XSTORM_ETH_QUEUE_ZONE_GTT_OFFSET(p_cid->abs.queue_id);
+ coalesce = qed_rd(p_hwfn, p_ptt, address);
+
+ is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID);
+ if (!is_valid)
+ return -EINVAL;
+
+ coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET);
+ *p_tx_coal = (u16)(coalesce << timer_res);
+
+ return 0;
+}
+
+int qed_get_queue_coalesce(struct qed_hwfn *p_hwfn, u16 *p_coal, void *handle)
+{
+ struct qed_queue_cid *p_cid = handle;
+ struct qed_ptt *p_ptt;
+ int rc = 0;
+
+ if (IS_VF(p_hwfn->cdev)) {
+ rc = qed_vf_pf_get_coalesce(p_hwfn, p_coal, p_cid);
+ if (rc)
+ DP_NOTICE(p_hwfn, "Unable to read queue coalescing\n");
+
+ return rc;
+ }
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EAGAIN;
+
+ if (p_cid->b_is_rx) {
+ rc = qed_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, p_coal);
+ if (rc)
+ goto out;
+ } else {
+ rc = qed_get_txq_coalesce(p_hwfn, p_ptt, p_cid, p_coal);
+ if (rc)
+ goto out;
+ }
+
+out:
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
+static int qed_fill_eth_dev_info(struct qed_dev *cdev,
+ struct qed_dev_eth_info *info)
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ int i;
+
+ memset(info, 0, sizeof(*info));
+
+ if (IS_PF(cdev)) {
+ int max_vf_vlan_filters = 0;
+ int max_vf_mac_filters = 0;
+
+ info->num_tc = p_hwfn->hw_info.num_hw_tc;
+
+ if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
+ u16 num_queues = 0;
+
+ /* Since the feature controls only queue-zones,
+ * make sure we have the contexts [rx, xdp, tcs] to
+ * match.
+ */
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *hwfn = &cdev->hwfns[i];
+ u16 l2_queues = (u16)FEAT_NUM(hwfn,
+ QED_PF_L2_QUE);
+ u16 cids;
+
+ cids = hwfn->pf_params.eth_pf_params.num_cons;
+ cids /= (2 + info->num_tc);
+ num_queues += min_t(u16, l2_queues, cids);
+ }
+
+ /* queues might theoretically be >256, but interrupts'
+ * upper-limit guarantes that it would fit in a u8.
+ */
+ if (cdev->int_params.fp_msix_cnt) {
+ u8 irqs = cdev->int_params.fp_msix_cnt;
+
+ info->num_queues = (u8)min_t(u16,
+ num_queues, irqs);
+ }
+ } else {
+ info->num_queues = cdev->num_hwfns;
+ }
+
+ if (IS_QED_SRIOV(cdev)) {
+ max_vf_vlan_filters = cdev->p_iov_info->total_vfs *
+ QED_ETH_VF_NUM_VLAN_FILTERS;
+ max_vf_mac_filters = cdev->p_iov_info->total_vfs *
+ QED_ETH_VF_NUM_MAC_FILTERS;
+ }
+ info->num_vlan_filters = RESC_NUM(QED_LEADING_HWFN(cdev),
+ QED_VLAN) -
+ max_vf_vlan_filters;
+ info->num_mac_filters = RESC_NUM(QED_LEADING_HWFN(cdev),
+ QED_MAC) -
+ max_vf_mac_filters;
+
+ ether_addr_copy(info->port_mac,
+ cdev->hwfns[0].hw_info.hw_mac_addr);
+
+ info->xdp_supported = true;
+ } else {
+ u16 total_cids = 0;
+
+ info->num_tc = 1;
+
+ /* Determine queues & XDP support */
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ u8 queues, cids;
+
+ qed_vf_get_num_cids(p_hwfn, &cids);
+ qed_vf_get_num_rxqs(p_hwfn, &queues);
+ info->num_queues += queues;
+ total_cids += cids;
+ }
+
+ /* Enable VF XDP in case PF guarntees sufficient connections */
+ if (total_cids >= info->num_queues * 3)
+ info->xdp_supported = true;
+
+ qed_vf_get_num_vlan_filters(&cdev->hwfns[0],
+ (u8 *)&info->num_vlan_filters);
+ qed_vf_get_num_mac_filters(&cdev->hwfns[0],
+ (u8 *)&info->num_mac_filters);
+ qed_vf_get_port_mac(&cdev->hwfns[0], info->port_mac);
+
+ info->is_legacy = !!cdev->hwfns[0].vf_iov_info->b_pre_fp_hsi;
+ }
+
+ qed_fill_dev_info(cdev, &info->common);
+
+ if (IS_VF(cdev))
+ eth_zero_addr(info->common.hw_mac);
+
+ return 0;
+}
+
+static void qed_register_eth_ops(struct qed_dev *cdev,
+ struct qed_eth_cb_ops *ops, void *cookie)
+{
+ cdev->protocol_ops.eth = ops;
+ cdev->ops_cookie = cookie;
+
+ /* For VF, we start bulletin reading */
+ if (IS_VF(cdev))
+ qed_vf_start_iov_wq(cdev);
+}
+
+static bool qed_check_mac(struct qed_dev *cdev, u8 *mac)
+{
+ if (IS_PF(cdev))
+ return true;
+
+ return qed_vf_check_mac(&cdev->hwfns[0], mac);
+}
+
+static int qed_start_vport(struct qed_dev *cdev,
+ struct qed_start_vport_params *params)
+{
+ int rc, i;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_sp_vport_start_params start = { 0 };
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ start.tpa_mode = params->gro_enable ? QED_TPA_MODE_GRO :
+ QED_TPA_MODE_NONE;
+ start.remove_inner_vlan = params->remove_inner_vlan;
+ start.only_untagged = true; /* untagged only */
+ start.drop_ttl0 = params->drop_ttl0;
+ start.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ start.concrete_fid = p_hwfn->hw_info.concrete_fid;
+ start.handle_ptp_pkts = params->handle_ptp_pkts;
+ start.vport_id = params->vport_id;
+ start.max_buffers_per_cqe = 16;
+ start.mtu = params->mtu;
+
+ rc = qed_sp_vport_start(p_hwfn, &start);
+ if (rc) {
+ DP_ERR(cdev, "Failed to start VPORT\n");
+ return rc;
+ }
+
+ rc = qed_hw_start_fastpath(p_hwfn);
+ if (rc) {
+ DP_ERR(cdev, "Failed to start VPORT fastpath\n");
+ return rc;
+ }
+
+ DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
+ "Started V-PORT %d with MTU %d\n",
+ start.vport_id, start.mtu);
+ }
+
+ if (params->clear_stats)
+ qed_reset_vport_stats(cdev);
+
+ return 0;
+}
+
+static int qed_stop_vport(struct qed_dev *cdev, u8 vport_id)
+{
+ int rc, i;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ rc = qed_sp_vport_stop(p_hwfn,
+ p_hwfn->hw_info.opaque_fid, vport_id);
+
+ if (rc) {
+ DP_ERR(cdev, "Failed to stop VPORT\n");
+ return rc;
+ }
+ }
+ return 0;
+}
+
+static int qed_update_vport_rss(struct qed_dev *cdev,
+ struct qed_update_vport_rss_params *input,
+ struct qed_rss_params *rss)
+{
+ int i, fn;
+
+ /* Update configuration with what's correct regardless of CMT */
+ rss->update_rss_config = 1;
+ rss->rss_enable = 1;
+ rss->update_rss_capabilities = 1;
+ rss->update_rss_ind_table = 1;
+ rss->update_rss_key = 1;
+ rss->rss_caps = input->rss_caps;
+ memcpy(rss->rss_key, input->rss_key, QED_RSS_KEY_SIZE * sizeof(u32));
+
+ /* In regular scenario, we'd simply need to take input handlers.
+ * But in CMT, we'd have to split the handlers according to the
+ * engine they were configured on. We'd then have to understand
+ * whether RSS is really required, since 2-queues on CMT doesn't
+ * require RSS.
+ */
+ if (cdev->num_hwfns == 1) {
+ memcpy(rss->rss_ind_table,
+ input->rss_ind_table,
+ QED_RSS_IND_TABLE_SIZE * sizeof(void *));
+ rss->rss_table_size_log = 7;
+ return 0;
+ }
+
+ /* Start by copying the non-spcific information to the 2nd copy */
+ memcpy(&rss[1], &rss[0], sizeof(struct qed_rss_params));
+
+ /* CMT should be round-robin */
+ for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
+ struct qed_queue_cid *cid = input->rss_ind_table[i];
+ struct qed_rss_params *t_rss;
+
+ if (cid->p_owner == QED_LEADING_HWFN(cdev))
+ t_rss = &rss[0];
+ else
+ t_rss = &rss[1];
+
+ t_rss->rss_ind_table[i / cdev->num_hwfns] = cid;
+ }
+
+ /* Make sure RSS is actually required */
+ for_each_hwfn(cdev, fn) {
+ for (i = 1; i < QED_RSS_IND_TABLE_SIZE / cdev->num_hwfns; i++) {
+ if (rss[fn].rss_ind_table[i] !=
+ rss[fn].rss_ind_table[0])
+ break;
+ }
+ if (i == QED_RSS_IND_TABLE_SIZE / cdev->num_hwfns) {
+ DP_VERBOSE(cdev, NETIF_MSG_IFUP,
+ "CMT - 1 queue per-hwfn; Disabling RSS\n");
+ return -EINVAL;
+ }
+ rss[fn].rss_table_size_log = 6;
+ }
+
+ return 0;
+}
+
+static int qed_update_vport(struct qed_dev *cdev,
+ struct qed_update_vport_params *params)
+{
+ struct qed_sp_vport_update_params sp_params;
+ struct qed_rss_params *rss;
+ int rc = 0, i;
+
+ if (!cdev)
+ return -ENODEV;
+
+ rss = vzalloc(array_size(sizeof(*rss), cdev->num_hwfns));
+ if (!rss)
+ return -ENOMEM;
+
+ memset(&sp_params, 0, sizeof(sp_params));
+
+ /* Translate protocol params into sp params */
+ sp_params.vport_id = params->vport_id;
+ sp_params.update_vport_active_rx_flg = params->update_vport_active_flg;
+ sp_params.update_vport_active_tx_flg = params->update_vport_active_flg;
+ sp_params.vport_active_rx_flg = params->vport_active_flg;
+ sp_params.vport_active_tx_flg = params->vport_active_flg;
+ sp_params.update_tx_switching_flg = params->update_tx_switching_flg;
+ sp_params.tx_switching_flg = params->tx_switching_flg;
+ sp_params.accept_any_vlan = params->accept_any_vlan;
+ sp_params.update_accept_any_vlan_flg =
+ params->update_accept_any_vlan_flg;
+
+ /* Prepare the RSS configuration */
+ if (params->update_rss_flg)
+ if (qed_update_vport_rss(cdev, &params->rss_params, rss))
+ params->update_rss_flg = 0;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ if (params->update_rss_flg)
+ sp_params.rss_params = &rss[i];
+
+ sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ rc = qed_sp_vport_update(p_hwfn, &sp_params,
+ QED_SPQ_MODE_EBLOCK,
+ NULL);
+ if (rc) {
+ DP_ERR(cdev, "Failed to update VPORT\n");
+ goto out;
+ }
+
+ DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
+ "Updated V-PORT %d: active_flag %d [update %d]\n",
+ params->vport_id, params->vport_active_flg,
+ params->update_vport_active_flg);
+ }
+
+out:
+ vfree(rss);
+ return rc;
+}
+
+static int qed_start_rxq(struct qed_dev *cdev,
+ u8 rss_num,
+ struct qed_queue_start_common_params *p_params,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size,
+ struct qed_rxq_start_ret_params *ret_params)
+{
+ struct qed_hwfn *p_hwfn;
+ int rc, hwfn_index;
+
+ hwfn_index = rss_num % cdev->num_hwfns;
+ p_hwfn = &cdev->hwfns[hwfn_index];
+
+ p_params->queue_id = p_params->queue_id / cdev->num_hwfns;
+ p_params->stats_id = p_params->vport_id;
+
+ rc = qed_eth_rx_queue_start(p_hwfn,
+ p_hwfn->hw_info.opaque_fid,
+ p_params,
+ bd_max_bytes,
+ bd_chain_phys_addr,
+ cqe_pbl_addr, cqe_pbl_size, ret_params);
+ if (rc) {
+ DP_ERR(cdev, "Failed to start RXQ#%d\n", p_params->queue_id);
+ return rc;
+ }
+
+ DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
+ "Started RX-Q %d [rss_num %d] on V-PORT %d and SB igu %d\n",
+ p_params->queue_id, rss_num, p_params->vport_id,
+ p_params->p_sb->igu_sb_id);
+
+ return 0;
+}
+
+static int qed_stop_rxq(struct qed_dev *cdev, u8 rss_id, void *handle)
+{
+ int rc, hwfn_index;
+ struct qed_hwfn *p_hwfn;
+
+ hwfn_index = rss_id % cdev->num_hwfns;
+ p_hwfn = &cdev->hwfns[hwfn_index];
+
+ rc = qed_eth_rx_queue_stop(p_hwfn, handle, false, false);
+ if (rc) {
+ DP_ERR(cdev, "Failed to stop RXQ#%02x\n", rss_id);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int qed_start_txq(struct qed_dev *cdev,
+ u8 rss_num,
+ struct qed_queue_start_common_params *p_params,
+ dma_addr_t pbl_addr,
+ u16 pbl_size,
+ struct qed_txq_start_ret_params *ret_params)
+{
+ struct qed_hwfn *p_hwfn;
+ int rc, hwfn_index;
+
+ hwfn_index = rss_num % cdev->num_hwfns;
+ p_hwfn = &cdev->hwfns[hwfn_index];
+ p_params->queue_id = p_params->queue_id / cdev->num_hwfns;
+ p_params->stats_id = p_params->vport_id;
+
+ rc = qed_eth_tx_queue_start(p_hwfn,
+ p_hwfn->hw_info.opaque_fid,
+ p_params, p_params->tc,
+ pbl_addr, pbl_size, ret_params);
+
+ if (rc) {
+ DP_ERR(cdev, "Failed to start TXQ#%d\n", p_params->queue_id);
+ return rc;
+ }
+
+ DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP),
+ "Started TX-Q %d [rss_num %d] on V-PORT %d and SB igu %d\n",
+ p_params->queue_id, rss_num, p_params->vport_id,
+ p_params->p_sb->igu_sb_id);
+
+ return 0;
+}
+
+#define QED_HW_STOP_RETRY_LIMIT (10)
+static int qed_fastpath_stop(struct qed_dev *cdev)
+{
+ int rc;
+
+ rc = qed_hw_stop_fastpath(cdev);
+ if (rc) {
+ DP_ERR(cdev, "Failed to stop Fastpath\n");
+ return rc;
+ }
+
+ return 0;
+}
+
+static int qed_stop_txq(struct qed_dev *cdev, u8 rss_id, void *handle)
+{
+ struct qed_hwfn *p_hwfn;
+ int rc, hwfn_index;
+
+ hwfn_index = rss_id % cdev->num_hwfns;
+ p_hwfn = &cdev->hwfns[hwfn_index];
+
+ rc = qed_eth_tx_queue_stop(p_hwfn, handle);
+ if (rc) {
+ DP_ERR(cdev, "Failed to stop TXQ#%02x\n", rss_id);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int qed_tunn_configure(struct qed_dev *cdev,
+ struct qed_tunn_params *tunn_params)
+{
+ struct qed_tunnel_info tunn_info;
+ int i, rc;
+
+ memset(&tunn_info, 0, sizeof(tunn_info));
+ if (tunn_params->update_vxlan_port) {
+ tunn_info.vxlan_port.b_update_port = true;
+ tunn_info.vxlan_port.port = tunn_params->vxlan_port;
+ }
+
+ if (tunn_params->update_geneve_port) {
+ tunn_info.geneve_port.b_update_port = true;
+ tunn_info.geneve_port.port = tunn_params->geneve_port;
+ }
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *hwfn = &cdev->hwfns[i];
+ struct qed_ptt *p_ptt;
+ struct qed_tunnel_info *tun;
+
+ tun = &hwfn->cdev->tunnel;
+ if (IS_PF(cdev)) {
+ p_ptt = qed_ptt_acquire(hwfn);
+ if (!p_ptt)
+ return -EAGAIN;
+ } else {
+ p_ptt = NULL;
+ }
+
+ rc = qed_sp_pf_update_tunn_cfg(hwfn, p_ptt, &tunn_info,
+ QED_SPQ_MODE_EBLOCK, NULL);
+ if (rc) {
+ if (IS_PF(cdev))
+ qed_ptt_release(hwfn, p_ptt);
+ return rc;
+ }
+
+ if (IS_PF_SRIOV(hwfn)) {
+ u16 vxlan_port, geneve_port;
+ int j;
+
+ vxlan_port = tun->vxlan_port.port;
+ geneve_port = tun->geneve_port.port;
+
+ qed_for_each_vf(hwfn, j) {
+ qed_iov_bulletin_set_udp_ports(hwfn, j,
+ vxlan_port,
+ geneve_port);
+ }
+
+ qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
+ }
+ if (IS_PF(cdev))
+ qed_ptt_release(hwfn, p_ptt);
+ }
+
+ return 0;
+}
+
+static int qed_configure_filter_rx_mode(struct qed_dev *cdev,
+ enum qed_filter_rx_mode_type type)
+{
+ struct qed_filter_accept_flags accept_flags;
+
+ memset(&accept_flags, 0, sizeof(accept_flags));
+
+ accept_flags.update_rx_mode_config = 1;
+ accept_flags.update_tx_mode_config = 1;
+ accept_flags.rx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
+ QED_ACCEPT_MCAST_MATCHED |
+ QED_ACCEPT_BCAST;
+ accept_flags.tx_accept_filter = QED_ACCEPT_UCAST_MATCHED |
+ QED_ACCEPT_MCAST_MATCHED |
+ QED_ACCEPT_BCAST;
+
+ if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
+ accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
+ QED_ACCEPT_MCAST_UNMATCHED;
+ accept_flags.tx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
+ QED_ACCEPT_MCAST_UNMATCHED;
+ } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
+ accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
+ accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
+ }
+
+ return qed_filter_accept_cmd(cdev, 0, accept_flags, false, false,
+ QED_SPQ_MODE_CB, NULL);
+}
+
+static int qed_configure_filter_ucast(struct qed_dev *cdev,
+ struct qed_filter_ucast_params *params)
+{
+ struct qed_filter_ucast ucast;
+
+ if (!params->vlan_valid && !params->mac_valid) {
+ DP_NOTICE(cdev,
+ "Tried configuring a unicast filter, but both MAC and VLAN are not set\n");
+ return -EINVAL;
+ }
+
+ memset(&ucast, 0, sizeof(ucast));
+ switch (params->type) {
+ case QED_FILTER_XCAST_TYPE_ADD:
+ ucast.opcode = QED_FILTER_ADD;
+ break;
+ case QED_FILTER_XCAST_TYPE_DEL:
+ ucast.opcode = QED_FILTER_REMOVE;
+ break;
+ case QED_FILTER_XCAST_TYPE_REPLACE:
+ ucast.opcode = QED_FILTER_REPLACE;
+ break;
+ default:
+ DP_NOTICE(cdev, "Unknown unicast filter type %d\n",
+ params->type);
+ }
+
+ if (params->vlan_valid && params->mac_valid) {
+ ucast.type = QED_FILTER_MAC_VLAN;
+ ether_addr_copy(ucast.mac, params->mac);
+ ucast.vlan = params->vlan;
+ } else if (params->mac_valid) {
+ ucast.type = QED_FILTER_MAC;
+ ether_addr_copy(ucast.mac, params->mac);
+ } else {
+ ucast.type = QED_FILTER_VLAN;
+ ucast.vlan = params->vlan;
+ }
+
+ ucast.is_rx_filter = true;
+ ucast.is_tx_filter = true;
+
+ return qed_filter_ucast_cmd(cdev, &ucast, QED_SPQ_MODE_CB, NULL);
+}
+
+static int qed_configure_filter_mcast(struct qed_dev *cdev,
+ struct qed_filter_mcast_params *params)
+{
+ struct qed_filter_mcast mcast;
+ int i;
+
+ memset(&mcast, 0, sizeof(mcast));
+ switch (params->type) {
+ case QED_FILTER_XCAST_TYPE_ADD:
+ mcast.opcode = QED_FILTER_ADD;
+ break;
+ case QED_FILTER_XCAST_TYPE_DEL:
+ mcast.opcode = QED_FILTER_REMOVE;
+ break;
+ default:
+ DP_NOTICE(cdev, "Unknown multicast filter type %d\n",
+ params->type);
+ }
+
+ mcast.num_mc_addrs = params->num;
+ for (i = 0; i < mcast.num_mc_addrs; i++)
+ ether_addr_copy(mcast.mac[i], params->mac[i]);
+
+ return qed_filter_mcast_cmd(cdev, &mcast, QED_SPQ_MODE_CB, NULL);
+}
+
+static int qed_configure_arfs_searcher(struct qed_dev *cdev,
+ enum qed_filter_config_mode mode)
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_arfs_config_params arfs_config_params;
+
+ memset(&arfs_config_params, 0, sizeof(arfs_config_params));
+ arfs_config_params.tcp = true;
+ arfs_config_params.udp = true;
+ arfs_config_params.ipv4 = true;
+ arfs_config_params.ipv6 = true;
+ arfs_config_params.mode = mode;
+ qed_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
+ &arfs_config_params);
+ return 0;
+}
+
+static void
+qed_arfs_sp_response_handler(struct qed_hwfn *p_hwfn,
+ void *cookie,
+ union event_ring_data *data, u8 fw_return_code)
+{
+ struct qed_common_cb_ops *op = p_hwfn->cdev->protocol_ops.common;
+ void *dev = p_hwfn->cdev->ops_cookie;
+
+ op->arfs_filter_op(dev, cookie, fw_return_code);
+}
+
+static int
+qed_ntuple_arfs_filter_config(struct qed_dev *cdev,
+ void *cookie,
+ struct qed_ntuple_filter_params *params)
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_spq_comp_cb cb;
+ int rc = -EINVAL;
+
+ cb.function = qed_arfs_sp_response_handler;
+ cb.cookie = cookie;
+
+ if (params->b_is_vf) {
+ if (!qed_iov_is_valid_vfid(p_hwfn, params->vf_id, false,
+ false)) {
+ DP_INFO(p_hwfn, "vfid 0x%02x is out of bounds\n",
+ params->vf_id);
+ return rc;
+ }
+
+ params->vport_id = params->vf_id + 1;
+ params->qid = QED_RFS_NTUPLE_QID_RSS;
+ }
+
+ rc = qed_configure_rfs_ntuple_filter(p_hwfn, &cb, params);
+ if (rc)
+ DP_NOTICE(p_hwfn,
+ "Failed to issue a-RFS filter configuration\n");
+ else
+ DP_VERBOSE(p_hwfn, NETIF_MSG_DRV,
+ "Successfully issued a-RFS filter configuration\n");
+
+ return rc;
+}
+
+static int qed_get_coalesce(struct qed_dev *cdev, u16 *coal, void *handle)
+{
+ struct qed_queue_cid *p_cid = handle;
+ struct qed_hwfn *p_hwfn;
+ int rc;
+
+ p_hwfn = p_cid->p_owner;
+ rc = qed_get_queue_coalesce(p_hwfn, coal, handle);
+ if (rc)
+ DP_VERBOSE(cdev, QED_MSG_DEBUG,
+ "Unable to read queue coalescing\n");
+
+ return rc;
+}
+
+static int qed_fp_cqe_completion(struct qed_dev *dev,
+ u8 rss_id, struct eth_slow_path_rx_cqe *cqe)
+{
+ return qed_eth_cqe_completion(&dev->hwfns[rss_id % dev->num_hwfns],
+ cqe);
+}
+
+static int qed_req_bulletin_update_mac(struct qed_dev *cdev, const u8 *mac)
+{
+ int i, ret;
+
+ if (IS_PF(cdev))
+ return 0;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ ret = qed_vf_pf_bulletin_update_mac(p_hwfn, mac);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct qed_eth_ops qed_eth_ops_pass = {
+ .common = &qed_common_ops_pass,
+#ifdef CONFIG_QED_SRIOV
+ .iov = &qed_iov_ops_pass,
+#endif
+#ifdef CONFIG_DCB
+ .dcb = &qed_dcbnl_ops_pass,
+#endif
+ .ptp = &qed_ptp_ops_pass,
+ .fill_dev_info = &qed_fill_eth_dev_info,
+ .register_ops = &qed_register_eth_ops,
+ .check_mac = &qed_check_mac,
+ .vport_start = &qed_start_vport,
+ .vport_stop = &qed_stop_vport,
+ .vport_update = &qed_update_vport,
+ .q_rx_start = &qed_start_rxq,
+ .q_rx_stop = &qed_stop_rxq,
+ .q_tx_start = &qed_start_txq,
+ .q_tx_stop = &qed_stop_txq,
+ .filter_config_rx_mode = &qed_configure_filter_rx_mode,
+ .filter_config_ucast = &qed_configure_filter_ucast,
+ .filter_config_mcast = &qed_configure_filter_mcast,
+ .fastpath_stop = &qed_fastpath_stop,
+ .eth_cqe_completion = &qed_fp_cqe_completion,
+ .get_vport_stats = &qed_get_vport_stats,
+ .tunn_config = &qed_tunn_configure,
+ .ntuple_filter_config = &qed_ntuple_arfs_filter_config,
+ .configure_arfs_searcher = &qed_configure_arfs_searcher,
+ .get_coalesce = &qed_get_coalesce,
+ .req_bulletin_update_mac = &qed_req_bulletin_update_mac,
+};
+
+const struct qed_eth_ops *qed_get_eth_ops(void)
+{
+ return &qed_eth_ops_pass;
+}
+EXPORT_SYMBOL(qed_get_eth_ops);
+
+void qed_put_eth_ops(void)
+{
+ /* TODO - reference count for module? */
+}
+EXPORT_SYMBOL(qed_put_eth_ops);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h
new file mode 100644
index 000000000..2d2f82c78
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h
@@ -0,0 +1,454 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ * Copyright (c) 2019-2020 Marvell International Ltd.
+ */
+
+#ifndef _QED_L2_H
+#define _QED_L2_H
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/qed/qed_eth_if.h>
+#include "qed.h"
+#include "qed_hw.h"
+#include "qed_sp.h"
+struct qed_rss_params {
+ u8 update_rss_config;
+ u8 rss_enable;
+ u8 rss_eng_id;
+ u8 update_rss_capabilities;
+ u8 update_rss_ind_table;
+ u8 update_rss_key;
+ u8 rss_caps;
+ u8 rss_table_size_log;
+
+ /* Indirection table consist of rx queue handles */
+ void *rss_ind_table[QED_RSS_IND_TABLE_SIZE];
+ u32 rss_key[QED_RSS_KEY_SIZE];
+};
+
+struct qed_sge_tpa_params {
+ u8 max_buffers_per_cqe;
+
+ u8 update_tpa_en_flg;
+ u8 tpa_ipv4_en_flg;
+ u8 tpa_ipv6_en_flg;
+ u8 tpa_ipv4_tunn_en_flg;
+ u8 tpa_ipv6_tunn_en_flg;
+
+ u8 update_tpa_param_flg;
+ u8 tpa_pkt_split_flg;
+ u8 tpa_hdr_data_split_flg;
+ u8 tpa_gro_consistent_flg;
+ u8 tpa_max_aggs_num;
+ u16 tpa_max_size;
+ u16 tpa_min_size_to_start;
+ u16 tpa_min_size_to_cont;
+};
+
+enum qed_filter_opcode {
+ QED_FILTER_ADD,
+ QED_FILTER_REMOVE,
+ QED_FILTER_MOVE,
+ QED_FILTER_REPLACE, /* Delete all MACs and add new one instead */
+ QED_FILTER_FLUSH, /* Removes all filters */
+};
+
+enum qed_filter_ucast_type {
+ QED_FILTER_MAC,
+ QED_FILTER_VLAN,
+ QED_FILTER_MAC_VLAN,
+ QED_FILTER_INNER_MAC,
+ QED_FILTER_INNER_VLAN,
+ QED_FILTER_INNER_PAIR,
+ QED_FILTER_INNER_MAC_VNI_PAIR,
+ QED_FILTER_MAC_VNI_PAIR,
+ QED_FILTER_VNI,
+};
+
+struct qed_filter_ucast {
+ enum qed_filter_opcode opcode;
+ enum qed_filter_ucast_type type;
+ u8 is_rx_filter;
+ u8 is_tx_filter;
+ u8 vport_to_add_to;
+ u8 vport_to_remove_from;
+ unsigned char mac[ETH_ALEN];
+ u8 assert_on_error;
+ u16 vlan;
+ u32 vni;
+};
+
+struct qed_filter_mcast {
+ /* MOVE is not supported for multicast */
+ enum qed_filter_opcode opcode;
+ u8 vport_to_add_to;
+ u8 vport_to_remove_from;
+ u8 num_mc_addrs;
+#define QED_MAX_MC_ADDRS 64
+ unsigned char mac[QED_MAX_MC_ADDRS][ETH_ALEN];
+};
+
+/**
+ * qed_eth_rx_queue_stop(): This ramrod closes an Rx queue.
+ *
+ * @p_hwfn: HW device data.
+ * @p_rxq: Handler of queue to close
+ * @eq_completion_only: If True completion will be on
+ * EQe, if False completion will be
+ * on EQe if p_hwfn opaque
+ * different from the RXQ opaque
+ * otherwise on CQe.
+ * @cqe_completion: If True completion will be receive on CQe.
+ *
+ * Return: Int.
+ */
+int
+qed_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
+ void *p_rxq,
+ bool eq_completion_only, bool cqe_completion);
+
+/**
+ * qed_eth_tx_queue_stop(): Closes a Tx queue.
+ *
+ * @p_hwfn: HW device data.
+ * @p_txq: handle to Tx queue needed to be closed.
+ *
+ * Return: Int.
+ */
+int qed_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, void *p_txq);
+
+enum qed_tpa_mode {
+ QED_TPA_MODE_NONE,
+ QED_TPA_MODE_UNUSED,
+ QED_TPA_MODE_GRO,
+ QED_TPA_MODE_MAX
+};
+
+struct qed_sp_vport_start_params {
+ enum qed_tpa_mode tpa_mode;
+ bool remove_inner_vlan;
+ bool tx_switching;
+ bool handle_ptp_pkts;
+ bool only_untagged;
+ bool drop_ttl0;
+ u8 max_buffers_per_cqe;
+ u32 concrete_fid;
+ u16 opaque_fid;
+ u8 vport_id;
+ u16 mtu;
+ bool check_mac;
+ bool check_ethtype;
+};
+
+int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_start_params *p_params);
+
+struct qed_filter_accept_flags {
+ u8 update_rx_mode_config;
+ u8 update_tx_mode_config;
+ u8 rx_accept_filter;
+ u8 tx_accept_filter;
+#define QED_ACCEPT_NONE 0x01
+#define QED_ACCEPT_UCAST_MATCHED 0x02
+#define QED_ACCEPT_UCAST_UNMATCHED 0x04
+#define QED_ACCEPT_MCAST_MATCHED 0x08
+#define QED_ACCEPT_MCAST_UNMATCHED 0x10
+#define QED_ACCEPT_BCAST 0x20
+#define QED_ACCEPT_ANY_VNI 0x40
+};
+
+struct qed_arfs_config_params {
+ bool tcp;
+ bool udp;
+ bool ipv4;
+ bool ipv6;
+ enum qed_filter_config_mode mode;
+};
+
+struct qed_sp_vport_update_params {
+ u16 opaque_fid;
+ u8 vport_id;
+ u8 update_vport_active_rx_flg;
+ u8 vport_active_rx_flg;
+ u8 update_vport_active_tx_flg;
+ u8 vport_active_tx_flg;
+ u8 update_inner_vlan_removal_flg;
+ u8 inner_vlan_removal_flg;
+ u8 silent_vlan_removal_flg;
+ u8 update_default_vlan_enable_flg;
+ u8 default_vlan_enable_flg;
+ u8 update_default_vlan_flg;
+ u16 default_vlan;
+ u8 update_tx_switching_flg;
+ u8 tx_switching_flg;
+ u8 update_approx_mcast_flg;
+ u8 update_anti_spoofing_en_flg;
+ u8 anti_spoofing_en;
+ u8 update_accept_any_vlan_flg;
+ u8 accept_any_vlan;
+ u32 bins[8];
+ struct qed_rss_params *rss_params;
+ struct qed_filter_accept_flags accept_flags;
+ struct qed_sge_tpa_params *sge_tpa_params;
+ u8 update_ctl_frame_check;
+ u8 mac_chk_en;
+ u8 ethtype_chk_en;
+};
+
+int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_params,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data);
+
+/**
+ * qed_sp_vport_stop: This ramrod closes a VPort after all its
+ * RX and TX queues are terminated.
+ * An Assert is generated if any queues are left open.
+ *
+ * @p_hwfn: HW device data.
+ * @opaque_fid: Opaque FID
+ * @vport_id: VPort ID.
+ *
+ * Return: Int.
+ */
+int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id);
+
+int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct qed_filter_ucast *p_filter_cmd,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data);
+
+/**
+ * qed_sp_eth_rx_queues_update(): This ramrod updates an RX queue.
+ * It is used for setting the active state
+ * of the queue and updating the TPA and
+ * SGE parameters.
+ * @p_hwfn: HW device data.
+ * @pp_rxq_handlers: An array of queue handlers to be updated.
+ * @num_rxqs: number of queues to update.
+ * @complete_cqe_flg: Post completion to the CQE Ring if set.
+ * @complete_event_flg: Post completion to the Event Ring if set.
+ * @comp_mode: Comp mode.
+ * @p_comp_data: Pointer Comp data.
+ *
+ * Return: Int.
+ *
+ * Note At the moment - only used by non-linux VFs.
+ */
+
+int
+qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
+ void **pp_rxq_handlers,
+ u8 num_rxqs,
+ u8 complete_cqe_flg,
+ u8 complete_event_flg,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data);
+
+/**
+ * qed_get_vport_stats(): Fills provided statistics
+ * struct with statistics.
+ *
+ * @cdev: Qed dev pointer.
+ * @stats: Points to struct that will be filled with statistics.
+ *
+ * Return: Void.
+ */
+void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats);
+
+/**
+ * qed_get_vport_stats_context(): Fills provided statistics
+ * struct with statistics.
+ *
+ * @cdev: Qed dev pointer.
+ * @stats: Points to struct that will be filled with statistics.
+ * @is_atomic: Hint from the caller - if the func can sleep or not.
+ *
+ * Context: The function should not sleep in case is_atomic == true.
+ * Return: Void.
+ */
+void qed_get_vport_stats_context(struct qed_dev *cdev,
+ struct qed_eth_stats *stats,
+ bool is_atomic);
+
+void qed_reset_vport_stats(struct qed_dev *cdev);
+
+/**
+ * qed_arfs_mode_configure(): Enable or disable rfs mode.
+ * It must accept at least one of tcp or udp true
+ * and at least one of ipv4 or ipv6 true to enable
+ * rfs mode.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @p_cfg_params: arfs mode configuration parameters.
+ *
+ * Return. Void.
+ */
+void qed_arfs_mode_configure(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_arfs_config_params *p_cfg_params);
+
+/**
+ * qed_configure_rfs_ntuple_filter(): This ramrod should be used to add
+ * or remove arfs hw filter
+ *
+ * @p_hwfn: HW device data.
+ * @p_cb: Used for QED_SPQ_MODE_CB,where client would initialize
+ * it with cookie and callback function address, if not
+ * using this mode then client must pass NULL.
+ * @p_params: Pointer to params.
+ *
+ * Return: Void.
+ */
+int
+qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
+ struct qed_spq_comp_cb *p_cb,
+ struct qed_ntuple_filter_params *p_params);
+
+#define MAX_QUEUES_PER_QZONE (sizeof(unsigned long) * 8)
+#define QED_QUEUE_CID_SELF (0xff)
+
+/* Almost identical to the qed_queue_start_common_params,
+ * but here we maintain the SB index in IGU CAM.
+ */
+struct qed_queue_cid_params {
+ u8 vport_id;
+ u16 queue_id;
+ u8 stats_id;
+};
+
+/* Additional parameters required for initialization of the queue_cid
+ * and are relevant only for a PF initializing one for its VFs.
+ */
+struct qed_queue_cid_vf_params {
+ /* Should match the VF's relative index */
+ u8 vfid;
+
+ /* 0-based queue index. Should reflect the relative qzone the
+ * VF thinks is associated with it [in its range].
+ */
+ u8 vf_qid;
+
+ /* Indicates a VF is legacy, making it differ in several things:
+ * - Producers would be placed in a different place.
+ * - Makes assumptions regarding the CIDs.
+ */
+ u8 vf_legacy;
+
+ u8 qid_usage_idx;
+};
+
+struct qed_queue_cid {
+ /* For stats-id, the `rel' is actually absolute as well */
+ struct qed_queue_cid_params rel;
+ struct qed_queue_cid_params abs;
+
+ /* These have no 'relative' meaning */
+ u16 sb_igu_id;
+ u8 sb_idx;
+
+ u32 cid;
+ u16 opaque_fid;
+
+ bool b_is_rx;
+
+ /* VFs queues are mapped differently, so we need to know the
+ * relative queue associated with them [0-based].
+ * Notice this is relevant on the *PF* queue-cid of its VF's queues,
+ * and not on the VF itself.
+ */
+ u8 vfid;
+ u8 vf_qid;
+
+ /* We need an additional index to differentiate between queues opened
+ * for same queue-zone, as VFs would have to communicate the info
+ * to the PF [otherwise PF has no way to differentiate].
+ */
+ u8 qid_usage_idx;
+
+ u8 vf_legacy;
+#define QED_QCID_LEGACY_VF_RX_PROD (BIT(0))
+#define QED_QCID_LEGACY_VF_CID (BIT(1))
+
+ struct qed_hwfn *p_owner;
+};
+
+int qed_l2_alloc(struct qed_hwfn *p_hwfn);
+void qed_l2_setup(struct qed_hwfn *p_hwfn);
+void qed_l2_free(struct qed_hwfn *p_hwfn);
+
+void qed_eth_queue_cid_release(struct qed_hwfn *p_hwfn,
+ struct qed_queue_cid *p_cid);
+
+struct qed_queue_cid *
+qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct qed_queue_start_common_params *p_params,
+ bool b_is_rx,
+ struct qed_queue_cid_vf_params *p_vf_params);
+
+int
+qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_start_params *p_params);
+
+/**
+ * qed_eth_rxq_start_ramrod(): Starts an Rx queue, when queue_cid is
+ * already prepared
+ *
+ * @p_hwfn: HW device data.
+ * @p_cid: Pointer CID.
+ * @bd_max_bytes: Max bytes.
+ * @bd_chain_phys_addr: Chain physcial address.
+ * @cqe_pbl_addr: PBL address.
+ * @cqe_pbl_size: PBL size.
+ *
+ * Return: Int.
+ */
+int
+qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
+ struct qed_queue_cid *p_cid,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size);
+
+/**
+ * qed_eth_txq_start_ramrod(): Starts a Tx queue, where queue_cid is
+ * already prepared
+ *
+ * @p_hwfn: HW device data.
+ * @p_cid: Pointer CID.
+ * @pbl_addr: PBL address.
+ * @pbl_size: PBL size.
+ * @pq_id: Parameters for choosing the PQ for this Tx queue.
+ *
+ * Return: Int.
+ */
+int
+qed_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
+ struct qed_queue_cid *p_cid,
+ dma_addr_t pbl_addr, u16 pbl_size, u16 pq_id);
+
+u8 qed_mcast_bin_from_mac(u8 *mac);
+
+int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 coalesce, struct qed_queue_cid *p_cid);
+
+int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 coalesce, struct qed_queue_cid *p_cid);
+
+int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_queue_cid *p_cid, u16 *p_hw_coal);
+
+int qed_get_txq_coalesce(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_queue_cid *p_cid, u16 *p_hw_coal);
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
new file mode 100644
index 000000000..810df65cd
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -0,0 +1,2823 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ * Copyright (c) 2019-2020 Marvell International Ltd.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/dma-mapping.h>
+#include <linux/if_vlan.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/stddef.h>
+#include <linux/workqueue.h>
+#include <net/ipv6.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/qed/qed_ll2_if.h>
+#include "qed.h"
+#include "qed_cxt.h"
+#include "qed_dev_api.h"
+#include "qed_hsi.h"
+#include "qed_iro_hsi.h"
+#include "qed_hw.h"
+#include "qed_int.h"
+#include "qed_ll2.h"
+#include "qed_mcp.h"
+#include "qed_ooo.h"
+#include "qed_reg_addr.h"
+#include "qed_sp.h"
+#include "qed_rdma.h"
+
+#define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registered)
+#define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registered)
+
+#define QED_LL2_TX_SIZE (256)
+#define QED_LL2_RX_SIZE (4096)
+
+#define QED_LL2_INVALID_STATS_ID 0xff
+
+struct qed_cb_ll2_info {
+ int rx_cnt;
+ u32 rx_size;
+ u8 handle;
+
+ /* Lock protecting LL2 buffer lists in sleepless context */
+ spinlock_t lock;
+ struct list_head list;
+
+ const struct qed_ll2_cb_ops *cbs;
+ void *cb_cookie;
+};
+
+struct qed_ll2_buffer {
+ struct list_head list;
+ void *data;
+ dma_addr_t phys_addr;
+};
+
+static u8 qed_ll2_handle_to_stats_id(struct qed_hwfn *p_hwfn,
+ u8 ll2_queue_type, u8 qid)
+{
+ u8 stats_id;
+
+ /* For legacy (RAM based) queues, the stats_id will be set as the
+ * queue_id. Otherwise (context based queue), it will be set to
+ * the "abs_pf_id" offset from the end of the RAM based queue IDs.
+ * If the final value exceeds the total counters amount, return
+ * INVALID value to indicate that the stats for this connection should
+ * be disabled.
+ */
+ if (ll2_queue_type == QED_LL2_RX_TYPE_LEGACY)
+ stats_id = qid;
+ else
+ stats_id = MAX_NUM_LL2_RX_RAM_QUEUES + p_hwfn->abs_pf_id;
+
+ if (stats_id < MAX_NUM_LL2_TX_STATS_COUNTERS)
+ return stats_id;
+ else
+ return QED_LL2_INVALID_STATS_ID;
+}
+
+static void qed_ll2b_complete_tx_packet(void *cxt,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t first_frag_addr,
+ bool b_last_fragment,
+ bool b_last_packet)
+{
+ struct qed_hwfn *p_hwfn = cxt;
+ struct qed_dev *cdev = p_hwfn->cdev;
+ struct sk_buff *skb = cookie;
+
+ /* All we need to do is release the mapping */
+ dma_unmap_single(&p_hwfn->cdev->pdev->dev, first_frag_addr,
+ skb_headlen(skb), DMA_TO_DEVICE);
+
+ if (cdev->ll2->cbs && cdev->ll2->cbs->tx_cb)
+ cdev->ll2->cbs->tx_cb(cdev->ll2->cb_cookie, skb,
+ b_last_fragment);
+
+ dev_kfree_skb_any(skb);
+}
+
+static int qed_ll2_alloc_buffer(struct qed_dev *cdev,
+ u8 **data, dma_addr_t *phys_addr)
+{
+ size_t size = cdev->ll2->rx_size + NET_SKB_PAD +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ *data = kmalloc(size, GFP_ATOMIC);
+ if (!(*data)) {
+ DP_INFO(cdev, "Failed to allocate LL2 buffer data\n");
+ return -ENOMEM;
+ }
+
+ *phys_addr = dma_map_single(&cdev->pdev->dev,
+ ((*data) + NET_SKB_PAD),
+ cdev->ll2->rx_size, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&cdev->pdev->dev, *phys_addr)) {
+ DP_INFO(cdev, "Failed to map LL2 buffer data\n");
+ kfree((*data));
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int qed_ll2_dealloc_buffer(struct qed_dev *cdev,
+ struct qed_ll2_buffer *buffer)
+{
+ spin_lock_bh(&cdev->ll2->lock);
+
+ dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
+ cdev->ll2->rx_size, DMA_FROM_DEVICE);
+ kfree(buffer->data);
+ list_del(&buffer->list);
+
+ cdev->ll2->rx_cnt--;
+ if (!cdev->ll2->rx_cnt)
+ DP_INFO(cdev, "All LL2 entries were removed\n");
+
+ spin_unlock_bh(&cdev->ll2->lock);
+
+ return 0;
+}
+
+static void qed_ll2_kill_buffers(struct qed_dev *cdev)
+{
+ struct qed_ll2_buffer *buffer, *tmp_buffer;
+
+ list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list)
+ qed_ll2_dealloc_buffer(cdev, buffer);
+}
+
+static void qed_ll2b_complete_rx_packet(void *cxt,
+ struct qed_ll2_comp_rx_data *data)
+{
+ struct qed_hwfn *p_hwfn = cxt;
+ struct qed_ll2_buffer *buffer = data->cookie;
+ struct qed_dev *cdev = p_hwfn->cdev;
+ dma_addr_t new_phys_addr;
+ struct sk_buff *skb;
+ bool reuse = false;
+ int rc = -EINVAL;
+ u8 *new_data;
+
+ DP_VERBOSE(p_hwfn,
+ (NETIF_MSG_RX_STATUS | QED_MSG_STORAGE | NETIF_MSG_PKTDATA),
+ "Got an LL2 Rx completion: [Buffer at phys 0x%llx, offset 0x%02x] Length 0x%04x Parse_flags 0x%04x vlan 0x%04x Opaque data [0x%08x:0x%08x]\n",
+ (u64)data->rx_buf_addr,
+ data->u.placement_offset,
+ data->length.packet_length,
+ data->parse_flags,
+ data->vlan, data->opaque_data_0, data->opaque_data_1);
+
+ if ((cdev->dp_module & NETIF_MSG_PKTDATA) && buffer->data) {
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_OFFSET, 16, 1,
+ buffer->data, data->length.packet_length, false);
+ }
+
+ /* Determine if data is valid */
+ if (data->length.packet_length < ETH_HLEN)
+ reuse = true;
+
+ /* Allocate a replacement for buffer; Reuse upon failure */
+ if (!reuse)
+ rc = qed_ll2_alloc_buffer(p_hwfn->cdev, &new_data,
+ &new_phys_addr);
+
+ /* If need to reuse or there's no replacement buffer, repost this */
+ if (rc)
+ goto out_post;
+ dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
+ cdev->ll2->rx_size, DMA_FROM_DEVICE);
+
+ skb = build_skb(buffer->data, 0);
+ if (!skb) {
+ DP_INFO(cdev, "Failed to build SKB\n");
+ kfree(buffer->data);
+ goto out_post1;
+ }
+
+ data->u.placement_offset += NET_SKB_PAD;
+ skb_reserve(skb, data->u.placement_offset);
+ skb_put(skb, data->length.packet_length);
+ skb_checksum_none_assert(skb);
+
+ /* Get parital ethernet information instead of eth_type_trans(),
+ * Since we don't have an associated net_device.
+ */
+ skb_reset_mac_header(skb);
+ skb->protocol = eth_hdr(skb)->h_proto;
+
+ /* Pass SKB onward */
+ if (cdev->ll2->cbs && cdev->ll2->cbs->rx_cb) {
+ if (data->vlan)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ data->vlan);
+ cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb,
+ data->opaque_data_0,
+ data->opaque_data_1);
+ } else {
+ DP_VERBOSE(p_hwfn, (NETIF_MSG_RX_STATUS | NETIF_MSG_PKTDATA |
+ QED_MSG_LL2 | QED_MSG_STORAGE),
+ "Dropping the packet\n");
+ kfree(buffer->data);
+ }
+
+out_post1:
+ /* Update Buffer information and update FW producer */
+ buffer->data = new_data;
+ buffer->phys_addr = new_phys_addr;
+
+out_post:
+ rc = qed_ll2_post_rx_buffer(p_hwfn, cdev->ll2->handle,
+ buffer->phys_addr, 0, buffer, 1);
+ if (rc)
+ qed_ll2_dealloc_buffer(cdev, buffer);
+}
+
+static struct qed_ll2_info *__qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
+ u8 connection_handle,
+ bool b_lock,
+ bool b_only_active)
+{
+ struct qed_ll2_info *p_ll2_conn, *p_ret = NULL;
+
+ if (connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS)
+ return NULL;
+
+ if (!p_hwfn->p_ll2_info)
+ return NULL;
+
+ p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
+
+ if (b_only_active) {
+ if (b_lock)
+ mutex_lock(&p_ll2_conn->mutex);
+ if (p_ll2_conn->b_active)
+ p_ret = p_ll2_conn;
+ if (b_lock)
+ mutex_unlock(&p_ll2_conn->mutex);
+ } else {
+ p_ret = p_ll2_conn;
+ }
+
+ return p_ret;
+}
+
+static struct qed_ll2_info *qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn,
+ u8 connection_handle)
+{
+ return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, true);
+}
+
+static struct qed_ll2_info *qed_ll2_handle_sanity_lock(struct qed_hwfn *p_hwfn,
+ u8 connection_handle)
+{
+ return __qed_ll2_handle_sanity(p_hwfn, connection_handle, true, true);
+}
+
+static struct qed_ll2_info *qed_ll2_handle_sanity_inactive(struct qed_hwfn
+ *p_hwfn,
+ u8 connection_handle)
+{
+ return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, false);
+}
+
+static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
+{
+ bool b_last_packet = false, b_last_frag = false;
+ struct qed_ll2_tx_packet *p_pkt = NULL;
+ struct qed_ll2_info *p_ll2_conn;
+ struct qed_ll2_tx_queue *p_tx;
+ unsigned long flags = 0;
+ dma_addr_t tx_frag;
+
+ p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
+ if (!p_ll2_conn)
+ return;
+
+ p_tx = &p_ll2_conn->tx_queue;
+
+ spin_lock_irqsave(&p_tx->lock, flags);
+ while (!list_empty(&p_tx->active_descq)) {
+ p_pkt = list_first_entry(&p_tx->active_descq,
+ struct qed_ll2_tx_packet, list_entry);
+ if (!p_pkt)
+ break;
+
+ list_del(&p_pkt->list_entry);
+ b_last_packet = list_empty(&p_tx->active_descq);
+ list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
+ spin_unlock_irqrestore(&p_tx->lock, flags);
+ if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) {
+ struct qed_ooo_buffer *p_buffer;
+
+ p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
+ qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
+ p_buffer);
+ } else {
+ p_tx->cur_completing_packet = *p_pkt;
+ p_tx->cur_completing_bd_idx = 1;
+ b_last_frag =
+ p_tx->cur_completing_bd_idx == p_pkt->bd_used;
+ tx_frag = p_pkt->bds_set[0].tx_frag;
+ p_ll2_conn->cbs.tx_release_cb(p_ll2_conn->cbs.cookie,
+ p_ll2_conn->my_id,
+ p_pkt->cookie,
+ tx_frag,
+ b_last_frag,
+ b_last_packet);
+ }
+ spin_lock_irqsave(&p_tx->lock, flags);
+ }
+ spin_unlock_irqrestore(&p_tx->lock, flags);
+}
+
+static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
+{
+ struct qed_ll2_info *p_ll2_conn = p_cookie;
+ struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
+ u16 new_idx = 0, num_bds = 0, num_bds_in_packet = 0;
+ struct qed_ll2_tx_packet *p_pkt;
+ bool b_last_frag = false;
+ unsigned long flags;
+ int rc = -EINVAL;
+
+ if (!p_ll2_conn)
+ return rc;
+
+ spin_lock_irqsave(&p_tx->lock, flags);
+ if (p_tx->b_completing_packet) {
+ rc = -EBUSY;
+ goto out;
+ }
+
+ new_idx = le16_to_cpu(*p_tx->p_fw_cons);
+ num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
+ while (num_bds) {
+ if (list_empty(&p_tx->active_descq))
+ goto out;
+
+ p_pkt = list_first_entry(&p_tx->active_descq,
+ struct qed_ll2_tx_packet, list_entry);
+ if (!p_pkt)
+ goto out;
+
+ p_tx->b_completing_packet = true;
+ p_tx->cur_completing_packet = *p_pkt;
+ num_bds_in_packet = p_pkt->bd_used;
+ list_del(&p_pkt->list_entry);
+
+ if (unlikely(num_bds < num_bds_in_packet)) {
+ DP_NOTICE(p_hwfn,
+ "Rest of BDs does not cover whole packet\n");
+ goto out;
+ }
+
+ num_bds -= num_bds_in_packet;
+ p_tx->bds_idx += num_bds_in_packet;
+ while (num_bds_in_packet--)
+ qed_chain_consume(&p_tx->txq_chain);
+
+ p_tx->cur_completing_bd_idx = 1;
+ b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used;
+ list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
+
+ spin_unlock_irqrestore(&p_tx->lock, flags);
+
+ p_ll2_conn->cbs.tx_comp_cb(p_ll2_conn->cbs.cookie,
+ p_ll2_conn->my_id,
+ p_pkt->cookie,
+ p_pkt->bds_set[0].tx_frag,
+ b_last_frag, !num_bds);
+
+ spin_lock_irqsave(&p_tx->lock, flags);
+ }
+
+ p_tx->b_completing_packet = false;
+ rc = 0;
+out:
+ spin_unlock_irqrestore(&p_tx->lock, flags);
+ return rc;
+}
+
+static void qed_ll2_rxq_parse_gsi(struct qed_hwfn *p_hwfn,
+ union core_rx_cqe_union *p_cqe,
+ struct qed_ll2_comp_rx_data *data)
+{
+ data->parse_flags = le16_to_cpu(p_cqe->rx_cqe_gsi.parse_flags.flags);
+ data->length.data_length = le16_to_cpu(p_cqe->rx_cqe_gsi.data_length);
+ data->vlan = le16_to_cpu(p_cqe->rx_cqe_gsi.vlan);
+ data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi);
+ data->opaque_data_1 = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo);
+ data->u.data_length_error = p_cqe->rx_cqe_gsi.data_length_error;
+ data->qp_id = le16_to_cpu(p_cqe->rx_cqe_gsi.qp_id);
+
+ data->src_qp = le32_to_cpu(p_cqe->rx_cqe_gsi.src_qp);
+}
+
+static void qed_ll2_rxq_parse_reg(struct qed_hwfn *p_hwfn,
+ union core_rx_cqe_union *p_cqe,
+ struct qed_ll2_comp_rx_data *data)
+{
+ data->parse_flags = le16_to_cpu(p_cqe->rx_cqe_fp.parse_flags.flags);
+ data->err_flags = le16_to_cpu(p_cqe->rx_cqe_fp.err_flags.flags);
+ data->length.packet_length =
+ le16_to_cpu(p_cqe->rx_cqe_fp.packet_length);
+ data->vlan = le16_to_cpu(p_cqe->rx_cqe_fp.vlan);
+ data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_fp.opaque_data.data[0]);
+ data->opaque_data_1 = le32_to_cpu(p_cqe->rx_cqe_fp.opaque_data.data[1]);
+ data->u.placement_offset = p_cqe->rx_cqe_fp.placement_offset;
+}
+
+static int
+qed_ll2_handle_slowpath(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_conn,
+ union core_rx_cqe_union *p_cqe,
+ unsigned long *p_lock_flags)
+{
+ struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
+ struct core_rx_slow_path_cqe *sp_cqe;
+
+ sp_cqe = &p_cqe->rx_cqe_sp;
+ if (sp_cqe->ramrod_cmd_id != CORE_RAMROD_RX_QUEUE_FLUSH) {
+ DP_NOTICE(p_hwfn,
+ "LL2 - unexpected Rx CQE slowpath ramrod_cmd_id:%d\n",
+ sp_cqe->ramrod_cmd_id);
+ return -EINVAL;
+ }
+
+ if (!p_ll2_conn->cbs.slowpath_cb) {
+ DP_NOTICE(p_hwfn,
+ "LL2 - received RX_QUEUE_FLUSH but no callback was provided\n");
+ return -EINVAL;
+ }
+
+ spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags);
+
+ p_ll2_conn->cbs.slowpath_cb(p_ll2_conn->cbs.cookie,
+ p_ll2_conn->my_id,
+ le32_to_cpu(sp_cqe->opaque_data.data[0]),
+ le32_to_cpu(sp_cqe->opaque_data.data[1]));
+
+ spin_lock_irqsave(&p_rx->lock, *p_lock_flags);
+
+ return 0;
+}
+
+static int
+qed_ll2_rxq_handle_completion(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_conn,
+ union core_rx_cqe_union *p_cqe,
+ unsigned long *p_lock_flags, bool b_last_cqe)
+{
+ struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
+ struct qed_ll2_rx_packet *p_pkt = NULL;
+ struct qed_ll2_comp_rx_data data;
+
+ if (!list_empty(&p_rx->active_descq))
+ p_pkt = list_first_entry(&p_rx->active_descq,
+ struct qed_ll2_rx_packet, list_entry);
+ if (unlikely(!p_pkt)) {
+ DP_NOTICE(p_hwfn,
+ "[%d] LL2 Rx completion but active_descq is empty\n",
+ p_ll2_conn->input.conn_type);
+
+ return -EIO;
+ }
+ list_del(&p_pkt->list_entry);
+
+ if (p_cqe->rx_cqe_sp.type == CORE_RX_CQE_TYPE_REGULAR)
+ qed_ll2_rxq_parse_reg(p_hwfn, p_cqe, &data);
+ else
+ qed_ll2_rxq_parse_gsi(p_hwfn, p_cqe, &data);
+ if (unlikely(qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd))
+ DP_NOTICE(p_hwfn,
+ "Mismatch between active_descq and the LL2 Rx chain\n");
+
+ list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
+
+ data.connection_handle = p_ll2_conn->my_id;
+ data.cookie = p_pkt->cookie;
+ data.rx_buf_addr = p_pkt->rx_buf_addr;
+ data.b_last_packet = b_last_cqe;
+
+ spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags);
+ p_ll2_conn->cbs.rx_comp_cb(p_ll2_conn->cbs.cookie, &data);
+
+ spin_lock_irqsave(&p_rx->lock, *p_lock_flags);
+
+ return 0;
+}
+
+static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
+{
+ struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)cookie;
+ struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
+ union core_rx_cqe_union *cqe = NULL;
+ u16 cq_new_idx = 0, cq_old_idx = 0;
+ unsigned long flags = 0;
+ int rc = 0;
+
+ if (!p_ll2_conn)
+ return rc;
+
+ spin_lock_irqsave(&p_rx->lock, flags);
+
+ if (!QED_LL2_RX_REGISTERED(p_ll2_conn)) {
+ spin_unlock_irqrestore(&p_rx->lock, flags);
+ return 0;
+ }
+
+ cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
+ cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
+
+ while (cq_new_idx != cq_old_idx) {
+ bool b_last_cqe = (cq_new_idx == cq_old_idx);
+
+ cqe =
+ (union core_rx_cqe_union *)
+ qed_chain_consume(&p_rx->rcq_chain);
+ cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_LL2,
+ "LL2 [sw. cons %04x, fw. at %04x] - Got Packet of type %02x\n",
+ cq_old_idx, cq_new_idx, cqe->rx_cqe_sp.type);
+
+ switch (cqe->rx_cqe_sp.type) {
+ case CORE_RX_CQE_TYPE_SLOW_PATH:
+ rc = qed_ll2_handle_slowpath(p_hwfn, p_ll2_conn,
+ cqe, &flags);
+ break;
+ case CORE_RX_CQE_TYPE_GSI_OFFLOAD:
+ case CORE_RX_CQE_TYPE_REGULAR:
+ rc = qed_ll2_rxq_handle_completion(p_hwfn, p_ll2_conn,
+ cqe, &flags,
+ b_last_cqe);
+ break;
+ default:
+ rc = -EIO;
+ }
+ }
+
+ spin_unlock_irqrestore(&p_rx->lock, flags);
+ return rc;
+}
+
+static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
+{
+ struct qed_ll2_info *p_ll2_conn = NULL;
+ struct qed_ll2_rx_packet *p_pkt = NULL;
+ struct qed_ll2_rx_queue *p_rx;
+ unsigned long flags = 0;
+
+ p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle);
+ if (!p_ll2_conn)
+ return;
+
+ p_rx = &p_ll2_conn->rx_queue;
+
+ spin_lock_irqsave(&p_rx->lock, flags);
+ while (!list_empty(&p_rx->active_descq)) {
+ p_pkt = list_first_entry(&p_rx->active_descq,
+ struct qed_ll2_rx_packet, list_entry);
+ if (!p_pkt)
+ break;
+ list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
+ spin_unlock_irqrestore(&p_rx->lock, flags);
+
+ if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) {
+ struct qed_ooo_buffer *p_buffer;
+
+ p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
+ qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
+ p_buffer);
+ } else {
+ dma_addr_t rx_buf_addr = p_pkt->rx_buf_addr;
+ void *cookie = p_pkt->cookie;
+ bool b_last;
+
+ b_last = list_empty(&p_rx->active_descq);
+ p_ll2_conn->cbs.rx_release_cb(p_ll2_conn->cbs.cookie,
+ p_ll2_conn->my_id,
+ cookie,
+ rx_buf_addr, b_last);
+ }
+ spin_lock_irqsave(&p_rx->lock, flags);
+ }
+ spin_unlock_irqrestore(&p_rx->lock, flags);
+}
+
+static bool
+qed_ll2_lb_rxq_handler_slowpath(struct qed_hwfn *p_hwfn,
+ struct core_rx_slow_path_cqe *p_cqe)
+{
+ struct ooo_opaque *ooo_opq;
+ u32 cid;
+
+ if (p_cqe->ramrod_cmd_id != CORE_RAMROD_RX_QUEUE_FLUSH)
+ return false;
+
+ ooo_opq = (struct ooo_opaque *)&p_cqe->opaque_data;
+ if (ooo_opq->ooo_opcode != TCP_EVENT_DELETE_ISLES)
+ return false;
+
+ /* Need to make a flush */
+ cid = le32_to_cpu(ooo_opq->cid);
+ qed_ooo_release_connection_isles(p_hwfn, p_hwfn->p_ooo_info, cid);
+
+ return true;
+}
+
+static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_conn)
+{
+ struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
+ u16 packet_length = 0, parse_flags = 0, vlan = 0;
+ struct qed_ll2_rx_packet *p_pkt = NULL;
+ u32 num_ooo_add_to_peninsula = 0, cid;
+ union core_rx_cqe_union *cqe = NULL;
+ u16 cq_new_idx = 0, cq_old_idx = 0;
+ struct qed_ooo_buffer *p_buffer;
+ struct ooo_opaque *ooo_opq;
+ u8 placement_offset = 0;
+ u8 cqe_type;
+
+ cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons);
+ cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
+ if (cq_new_idx == cq_old_idx)
+ return 0;
+
+ while (cq_new_idx != cq_old_idx) {
+ struct core_rx_fast_path_cqe *p_cqe_fp;
+
+ cqe = qed_chain_consume(&p_rx->rcq_chain);
+ cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain);
+ cqe_type = cqe->rx_cqe_sp.type;
+
+ if (cqe_type == CORE_RX_CQE_TYPE_SLOW_PATH)
+ if (qed_ll2_lb_rxq_handler_slowpath(p_hwfn,
+ &cqe->rx_cqe_sp))
+ continue;
+
+ if (unlikely(cqe_type != CORE_RX_CQE_TYPE_REGULAR)) {
+ DP_NOTICE(p_hwfn,
+ "Got a non-regular LB LL2 completion [type 0x%02x]\n",
+ cqe_type);
+ return -EINVAL;
+ }
+ p_cqe_fp = &cqe->rx_cqe_fp;
+
+ placement_offset = p_cqe_fp->placement_offset;
+ parse_flags = le16_to_cpu(p_cqe_fp->parse_flags.flags);
+ packet_length = le16_to_cpu(p_cqe_fp->packet_length);
+ vlan = le16_to_cpu(p_cqe_fp->vlan);
+ ooo_opq = (struct ooo_opaque *)&p_cqe_fp->opaque_data;
+ qed_ooo_save_history_entry(p_hwfn, p_hwfn->p_ooo_info, ooo_opq);
+ cid = le32_to_cpu(ooo_opq->cid);
+
+ /* Process delete isle first */
+ if (ooo_opq->drop_size)
+ qed_ooo_delete_isles(p_hwfn, p_hwfn->p_ooo_info, cid,
+ ooo_opq->drop_isle,
+ ooo_opq->drop_size);
+
+ if (ooo_opq->ooo_opcode == TCP_EVENT_NOP)
+ continue;
+
+ /* Now process create/add/join isles */
+ if (unlikely(list_empty(&p_rx->active_descq))) {
+ DP_NOTICE(p_hwfn,
+ "LL2 OOO RX chain has no submitted buffers\n"
+ );
+ return -EIO;
+ }
+
+ p_pkt = list_first_entry(&p_rx->active_descq,
+ struct qed_ll2_rx_packet, list_entry);
+
+ if (likely(ooo_opq->ooo_opcode == TCP_EVENT_ADD_NEW_ISLE ||
+ ooo_opq->ooo_opcode == TCP_EVENT_ADD_ISLE_RIGHT ||
+ ooo_opq->ooo_opcode == TCP_EVENT_ADD_ISLE_LEFT ||
+ ooo_opq->ooo_opcode == TCP_EVENT_ADD_PEN ||
+ ooo_opq->ooo_opcode == TCP_EVENT_JOIN)) {
+ if (unlikely(!p_pkt)) {
+ DP_NOTICE(p_hwfn,
+ "LL2 OOO RX packet is not valid\n");
+ return -EIO;
+ }
+ list_del(&p_pkt->list_entry);
+ p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
+ p_buffer->packet_length = packet_length;
+ p_buffer->parse_flags = parse_flags;
+ p_buffer->vlan = vlan;
+ p_buffer->placement_offset = placement_offset;
+ qed_chain_consume(&p_rx->rxq_chain);
+ list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
+
+ switch (ooo_opq->ooo_opcode) {
+ case TCP_EVENT_ADD_NEW_ISLE:
+ qed_ooo_add_new_isle(p_hwfn,
+ p_hwfn->p_ooo_info,
+ cid,
+ ooo_opq->ooo_isle,
+ p_buffer);
+ break;
+ case TCP_EVENT_ADD_ISLE_RIGHT:
+ qed_ooo_add_new_buffer(p_hwfn,
+ p_hwfn->p_ooo_info,
+ cid,
+ ooo_opq->ooo_isle,
+ p_buffer,
+ QED_OOO_RIGHT_BUF);
+ break;
+ case TCP_EVENT_ADD_ISLE_LEFT:
+ qed_ooo_add_new_buffer(p_hwfn,
+ p_hwfn->p_ooo_info,
+ cid,
+ ooo_opq->ooo_isle,
+ p_buffer,
+ QED_OOO_LEFT_BUF);
+ break;
+ case TCP_EVENT_JOIN:
+ qed_ooo_add_new_buffer(p_hwfn,
+ p_hwfn->p_ooo_info,
+ cid,
+ ooo_opq->ooo_isle + 1,
+ p_buffer,
+ QED_OOO_LEFT_BUF);
+ qed_ooo_join_isles(p_hwfn,
+ p_hwfn->p_ooo_info,
+ cid, ooo_opq->ooo_isle);
+ break;
+ case TCP_EVENT_ADD_PEN:
+ num_ooo_add_to_peninsula++;
+ qed_ooo_put_ready_buffer(p_hwfn,
+ p_hwfn->p_ooo_info,
+ p_buffer, true);
+ break;
+ }
+ } else {
+ DP_NOTICE(p_hwfn,
+ "Unexpected event (%d) TX OOO completion\n",
+ ooo_opq->ooo_opcode);
+ }
+ }
+
+ return 0;
+}
+
+static void
+qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_conn)
+{
+ struct qed_ll2_tx_pkt_info tx_pkt;
+ struct qed_ooo_buffer *p_buffer;
+ u16 l4_hdr_offset_w;
+ dma_addr_t first_frag;
+ u8 bd_flags;
+ int rc;
+
+ /* Submit Tx buffers here */
+ while ((p_buffer = qed_ooo_get_ready_buffer(p_hwfn,
+ p_hwfn->p_ooo_info))) {
+ l4_hdr_offset_w = 0;
+ bd_flags = 0;
+
+ first_frag = p_buffer->rx_buffer_phys_addr +
+ p_buffer->placement_offset;
+ SET_FIELD(bd_flags, CORE_TX_BD_DATA_FORCE_VLAN_MODE, 1);
+ SET_FIELD(bd_flags, CORE_TX_BD_DATA_L4_PROTOCOL, 1);
+
+ memset(&tx_pkt, 0, sizeof(tx_pkt));
+ tx_pkt.num_of_bds = 1;
+ tx_pkt.vlan = p_buffer->vlan;
+ tx_pkt.bd_flags = bd_flags;
+ tx_pkt.l4_hdr_offset_w = l4_hdr_offset_w;
+ switch (p_ll2_conn->tx_dest) {
+ case CORE_TX_DEST_NW:
+ tx_pkt.tx_dest = QED_LL2_TX_DEST_NW;
+ break;
+ case CORE_TX_DEST_LB:
+ tx_pkt.tx_dest = QED_LL2_TX_DEST_LB;
+ break;
+ case CORE_TX_DEST_DROP:
+ default:
+ tx_pkt.tx_dest = QED_LL2_TX_DEST_DROP;
+ break;
+ }
+ tx_pkt.first_frag = first_frag;
+ tx_pkt.first_frag_len = p_buffer->packet_length;
+ tx_pkt.cookie = p_buffer;
+
+ rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id,
+ &tx_pkt, true);
+ if (rc) {
+ qed_ooo_put_ready_buffer(p_hwfn, p_hwfn->p_ooo_info,
+ p_buffer, false);
+ break;
+ }
+ }
+}
+
+static void
+qed_ooo_submit_rx_buffers(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_conn)
+{
+ struct qed_ooo_buffer *p_buffer;
+ int rc;
+
+ while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
+ p_hwfn->p_ooo_info))) {
+ rc = qed_ll2_post_rx_buffer(p_hwfn,
+ p_ll2_conn->my_id,
+ p_buffer->rx_buffer_phys_addr,
+ 0, p_buffer, true);
+ if (rc) {
+ qed_ooo_put_free_buffer(p_hwfn,
+ p_hwfn->p_ooo_info, p_buffer);
+ break;
+ }
+ }
+}
+
+static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
+{
+ struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
+ int rc;
+
+ if (!p_ll2_conn)
+ return 0;
+
+ if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
+ return 0;
+
+ rc = qed_ll2_lb_rxq_handler(p_hwfn, p_ll2_conn);
+ if (rc)
+ return rc;
+
+ qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
+ qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn);
+
+ return 0;
+}
+
+static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
+{
+ struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie;
+ struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
+ struct qed_ll2_tx_packet *p_pkt = NULL;
+ struct qed_ooo_buffer *p_buffer;
+ bool b_dont_submit_rx = false;
+ u16 new_idx = 0, num_bds = 0;
+ int rc;
+
+ if (unlikely(!p_ll2_conn))
+ return 0;
+
+ if (unlikely(!QED_LL2_TX_REGISTERED(p_ll2_conn)))
+ return 0;
+
+ new_idx = le16_to_cpu(*p_tx->p_fw_cons);
+ num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
+
+ if (unlikely(!num_bds))
+ return 0;
+
+ while (num_bds) {
+ if (list_empty(&p_tx->active_descq))
+ return -EINVAL;
+
+ p_pkt = list_first_entry(&p_tx->active_descq,
+ struct qed_ll2_tx_packet, list_entry);
+ if (unlikely(!p_pkt))
+ return -EINVAL;
+
+ if (unlikely(p_pkt->bd_used != 1)) {
+ DP_NOTICE(p_hwfn,
+ "Unexpectedly many BDs(%d) in TX OOO completion\n",
+ p_pkt->bd_used);
+ return -EINVAL;
+ }
+
+ list_del(&p_pkt->list_entry);
+
+ num_bds--;
+ p_tx->bds_idx++;
+ qed_chain_consume(&p_tx->txq_chain);
+
+ p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
+ list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
+
+ if (b_dont_submit_rx) {
+ qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info,
+ p_buffer);
+ continue;
+ }
+
+ rc = qed_ll2_post_rx_buffer(p_hwfn, p_ll2_conn->my_id,
+ p_buffer->rx_buffer_phys_addr, 0,
+ p_buffer, true);
+ if (rc != 0) {
+ qed_ooo_put_free_buffer(p_hwfn,
+ p_hwfn->p_ooo_info, p_buffer);
+ b_dont_submit_rx = true;
+ }
+ }
+
+ qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn);
+
+ return 0;
+}
+
+static void qed_ll2_stop_ooo(struct qed_hwfn *p_hwfn)
+{
+ u8 *handle = &p_hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
+
+ DP_VERBOSE(p_hwfn, (QED_MSG_STORAGE | QED_MSG_LL2),
+ "Stopping LL2 OOO queue [%02x]\n", *handle);
+
+ qed_ll2_terminate_connection(p_hwfn, *handle);
+ qed_ll2_release_connection(p_hwfn, *handle);
+ *handle = QED_LL2_UNUSED_HANDLE;
+}
+
+static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_conn,
+ u8 action_on_error)
+{
+ enum qed_ll2_conn_type conn_type = p_ll2_conn->input.conn_type;
+ struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
+ struct core_rx_start_ramrod_data *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ u16 cqe_pbl_size;
+ int rc = 0;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_ll2_conn->cid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ CORE_RAMROD_RX_QUEUE_START,
+ PROTOCOLID_CORE, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.core_rx_queue_start;
+ memset(p_ramrod, 0, sizeof(*p_ramrod));
+ p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
+ p_ramrod->sb_index = p_rx->rx_sb_index;
+ p_ramrod->complete_event_flg = 1;
+
+ p_ramrod->mtu = cpu_to_le16(p_ll2_conn->input.mtu);
+ DMA_REGPAIR_LE(p_ramrod->bd_base, p_rx->rxq_chain.p_phys_addr);
+ cqe_pbl_size = (u16)qed_chain_get_page_cnt(&p_rx->rcq_chain);
+ p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
+ DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr,
+ qed_chain_get_pbl_phys(&p_rx->rcq_chain));
+
+ p_ramrod->drop_ttl0_flg = p_ll2_conn->input.rx_drop_ttl0_flg;
+ p_ramrod->inner_vlan_stripping_en =
+ p_ll2_conn->input.rx_vlan_removal_en;
+
+ if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) &&
+ p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE)
+ p_ramrod->report_outer_vlan = 1;
+ p_ramrod->queue_id = p_ll2_conn->queue_id;
+ p_ramrod->main_func_queue = p_ll2_conn->main_func_queue ? 1 : 0;
+
+ if (test_bit(QED_MF_LL2_NON_UNICAST, &p_hwfn->cdev->mf_bits) &&
+ p_ramrod->main_func_queue && conn_type != QED_LL2_TYPE_ROCE &&
+ conn_type != QED_LL2_TYPE_IWARP &&
+ (!QED_IS_NVMETCP_PERSONALITY(p_hwfn))) {
+ p_ramrod->mf_si_bcast_accept_all = 1;
+ p_ramrod->mf_si_mcast_accept_all = 1;
+ } else {
+ p_ramrod->mf_si_bcast_accept_all = 0;
+ p_ramrod->mf_si_mcast_accept_all = 0;
+ }
+
+ p_ramrod->action_on_error.error_type = action_on_error;
+ p_ramrod->gsi_offload_flag = p_ll2_conn->input.gsi_enable;
+ p_ramrod->zero_prod_flg = 1;
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_conn)
+{
+ enum qed_ll2_conn_type conn_type = p_ll2_conn->input.conn_type;
+ struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
+ struct core_tx_start_ramrod_data *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ u16 pq_id = 0, pbl_size;
+ int rc = -EINVAL;
+
+ if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
+ return 0;
+
+ if (likely(p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO))
+ p_ll2_conn->tx_stats_en = 0;
+ else
+ p_ll2_conn->tx_stats_en = 1;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_ll2_conn->cid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ CORE_RAMROD_TX_QUEUE_START,
+ PROTOCOLID_CORE, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.core_tx_queue_start;
+
+ p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
+ p_ramrod->sb_index = p_tx->tx_sb_index;
+ p_ramrod->mtu = cpu_to_le16(p_ll2_conn->input.mtu);
+ p_ramrod->stats_en = p_ll2_conn->tx_stats_en;
+ p_ramrod->stats_id = p_ll2_conn->tx_stats_id;
+
+ DMA_REGPAIR_LE(p_ramrod->pbl_base_addr,
+ qed_chain_get_pbl_phys(&p_tx->txq_chain));
+ pbl_size = qed_chain_get_page_cnt(&p_tx->txq_chain);
+ p_ramrod->pbl_size = cpu_to_le16(pbl_size);
+
+ switch (p_ll2_conn->input.tx_tc) {
+ case PURE_LB_TC:
+ pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
+ break;
+ case PKT_LB_TC:
+ pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OOO);
+ break;
+ default:
+ pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
+ break;
+ }
+
+ p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
+
+ switch (conn_type) {
+ case QED_LL2_TYPE_FCOE:
+ p_ramrod->conn_type = PROTOCOLID_FCOE;
+ break;
+ case QED_LL2_TYPE_TCP_ULP:
+ p_ramrod->conn_type = PROTOCOLID_TCP_ULP;
+ break;
+ case QED_LL2_TYPE_ROCE:
+ p_ramrod->conn_type = PROTOCOLID_ROCE;
+ break;
+ case QED_LL2_TYPE_IWARP:
+ p_ramrod->conn_type = PROTOCOLID_IWARP;
+ break;
+ case QED_LL2_TYPE_OOO:
+ if (p_hwfn->hw_info.personality == QED_PCI_ISCSI ||
+ p_hwfn->hw_info.personality == QED_PCI_NVMETCP)
+ p_ramrod->conn_type = PROTOCOLID_TCP_ULP;
+ else
+ p_ramrod->conn_type = PROTOCOLID_IWARP;
+ break;
+ default:
+ p_ramrod->conn_type = PROTOCOLID_ETH;
+ DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type);
+ }
+
+ p_ramrod->gsi_offload_flag = p_ll2_conn->input.gsi_enable;
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc)
+ return rc;
+
+ rc = qed_db_recovery_add(p_hwfn->cdev, p_tx->doorbell_addr,
+ &p_tx->db_msg, DB_REC_WIDTH_32B,
+ DB_REC_KERNEL);
+ return rc;
+}
+
+static int qed_sp_ll2_rx_queue_stop(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_conn)
+{
+ struct core_rx_stop_ramrod_data *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc = -EINVAL;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_ll2_conn->cid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ CORE_RAMROD_RX_QUEUE_STOP,
+ PROTOCOLID_CORE, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.core_rx_queue_stop;
+
+ p_ramrod->complete_event_flg = 1;
+ p_ramrod->queue_id = p_ll2_conn->queue_id;
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_conn)
+{
+ struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc = -EINVAL;
+
+ qed_db_recovery_del(p_hwfn->cdev, p_tx->doorbell_addr, &p_tx->db_msg);
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_ll2_conn->cid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ CORE_RAMROD_TX_QUEUE_STOP,
+ PROTOCOLID_CORE, &init_data);
+ if (rc)
+ return rc;
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int
+qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_info)
+{
+ struct qed_chain_init_params params = {
+ .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+ .cnt_type = QED_CHAIN_CNT_TYPE_U16,
+ .num_elems = p_ll2_info->input.rx_num_desc,
+ };
+ struct qed_dev *cdev = p_hwfn->cdev;
+ struct qed_ll2_rx_packet *p_descq;
+ u32 capacity;
+ int rc = 0;
+
+ if (!p_ll2_info->input.rx_num_desc)
+ goto out;
+
+ params.mode = QED_CHAIN_MODE_NEXT_PTR;
+ params.elem_size = sizeof(struct core_rx_bd);
+
+ rc = qed_chain_alloc(cdev, &p_ll2_info->rx_queue.rxq_chain, &params);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to allocate ll2 rxq chain\n");
+ goto out;
+ }
+
+ capacity = qed_chain_get_capacity(&p_ll2_info->rx_queue.rxq_chain);
+ p_descq = kcalloc(capacity, sizeof(struct qed_ll2_rx_packet),
+ GFP_KERNEL);
+ if (!p_descq) {
+ rc = -ENOMEM;
+ DP_NOTICE(p_hwfn, "Failed to allocate ll2 Rx desc\n");
+ goto out;
+ }
+ p_ll2_info->rx_queue.descq_array = p_descq;
+
+ params.mode = QED_CHAIN_MODE_PBL;
+ params.elem_size = sizeof(struct core_rx_fast_path_cqe);
+
+ rc = qed_chain_alloc(cdev, &p_ll2_info->rx_queue.rcq_chain, &params);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to allocate ll2 rcq chain\n");
+ goto out;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_LL2,
+ "Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n",
+ p_ll2_info->input.conn_type, p_ll2_info->input.rx_num_desc);
+
+out:
+ return rc;
+}
+
+static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_info)
+{
+ struct qed_chain_init_params params = {
+ .mode = QED_CHAIN_MODE_PBL,
+ .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+ .cnt_type = QED_CHAIN_CNT_TYPE_U16,
+ .num_elems = p_ll2_info->input.tx_num_desc,
+ .elem_size = sizeof(struct core_tx_bd),
+ };
+ struct qed_ll2_tx_packet *p_descq;
+ size_t desc_size;
+ u32 capacity;
+ int rc = 0;
+
+ if (!p_ll2_info->input.tx_num_desc)
+ goto out;
+
+ rc = qed_chain_alloc(p_hwfn->cdev, &p_ll2_info->tx_queue.txq_chain,
+ &params);
+ if (rc)
+ goto out;
+
+ capacity = qed_chain_get_capacity(&p_ll2_info->tx_queue.txq_chain);
+ /* All bds_set elements are flexibily added. */
+ desc_size = struct_size(p_descq, bds_set,
+ p_ll2_info->input.tx_max_bds_per_packet);
+
+ p_descq = kcalloc(capacity, desc_size, GFP_KERNEL);
+ if (!p_descq) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ p_ll2_info->tx_queue.descq_mem = p_descq;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_LL2,
+ "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n",
+ p_ll2_info->input.conn_type, p_ll2_info->input.tx_num_desc);
+
+out:
+ if (rc)
+ DP_NOTICE(p_hwfn,
+ "Can't allocate memory for Tx LL2 with 0x%08x buffers\n",
+ p_ll2_info->input.tx_num_desc);
+ return rc;
+}
+
+static int
+qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_info, u16 mtu)
+{
+ struct qed_ooo_buffer *p_buf = NULL;
+ void *p_virt;
+ u16 buf_idx;
+ int rc = 0;
+
+ if (p_ll2_info->input.conn_type != QED_LL2_TYPE_OOO)
+ return rc;
+
+ /* Correct number of requested OOO buffers if needed */
+ if (!p_ll2_info->input.rx_num_ooo_buffers) {
+ u16 num_desc = p_ll2_info->input.rx_num_desc;
+
+ if (!num_desc)
+ return -EINVAL;
+ p_ll2_info->input.rx_num_ooo_buffers = num_desc * 2;
+ }
+
+ for (buf_idx = 0; buf_idx < p_ll2_info->input.rx_num_ooo_buffers;
+ buf_idx++) {
+ p_buf = kzalloc(sizeof(*p_buf), GFP_KERNEL);
+ if (!p_buf) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ p_buf->rx_buffer_size = mtu + 26 + ETH_CACHE_LINE_SIZE;
+ p_buf->rx_buffer_size = (p_buf->rx_buffer_size +
+ ETH_CACHE_LINE_SIZE - 1) &
+ ~(ETH_CACHE_LINE_SIZE - 1);
+ p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ p_buf->rx_buffer_size,
+ &p_buf->rx_buffer_phys_addr,
+ GFP_KERNEL);
+ if (!p_virt) {
+ kfree(p_buf);
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ p_buf->rx_buffer_virt_addr = p_virt;
+ qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, p_buf);
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_LL2,
+ "Allocated [%04x] LL2 OOO buffers [each of size 0x%08x]\n",
+ p_ll2_info->input.rx_num_ooo_buffers, p_buf->rx_buffer_size);
+
+out:
+ return rc;
+}
+
+static int
+qed_ll2_set_cbs(struct qed_ll2_info *p_ll2_info, const struct qed_ll2_cbs *cbs)
+{
+ if (!cbs || (!cbs->rx_comp_cb ||
+ !cbs->rx_release_cb ||
+ !cbs->tx_comp_cb || !cbs->tx_release_cb || !cbs->cookie))
+ return -EINVAL;
+
+ p_ll2_info->cbs.rx_comp_cb = cbs->rx_comp_cb;
+ p_ll2_info->cbs.rx_release_cb = cbs->rx_release_cb;
+ p_ll2_info->cbs.tx_comp_cb = cbs->tx_comp_cb;
+ p_ll2_info->cbs.tx_release_cb = cbs->tx_release_cb;
+ p_ll2_info->cbs.slowpath_cb = cbs->slowpath_cb;
+ p_ll2_info->cbs.cookie = cbs->cookie;
+
+ return 0;
+}
+
+static void _qed_ll2_calc_allowed_conns(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_acquire_data *data,
+ u8 *start_idx, u8 *last_idx)
+{
+ /* LL2 queues handles will be split as follows:
+ * First will be the legacy queues, and then the ctx based.
+ */
+ if (data->input.rx_conn_type == QED_LL2_RX_TYPE_LEGACY) {
+ *start_idx = QED_LL2_LEGACY_CONN_BASE_PF;
+ *last_idx = *start_idx +
+ QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF;
+ } else {
+ /* QED_LL2_RX_TYPE_CTX */
+ *start_idx = QED_LL2_CTX_CONN_BASE_PF;
+ *last_idx = *start_idx +
+ QED_MAX_NUM_OF_CTX_LL2_CONNS_PF;
+ }
+}
+
+static enum core_error_handle
+qed_ll2_get_error_choice(enum qed_ll2_error_handle err)
+{
+ switch (err) {
+ case QED_LL2_DROP_PACKET:
+ return LL2_DROP_PACKET;
+ case QED_LL2_DO_NOTHING:
+ return LL2_DO_NOTHING;
+ case QED_LL2_ASSERT:
+ return LL2_ASSERT;
+ default:
+ return LL2_DO_NOTHING;
+ }
+}
+
+int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data)
+{
+ struct qed_hwfn *p_hwfn = cxt;
+ qed_int_comp_cb_t comp_rx_cb, comp_tx_cb;
+ struct qed_ll2_info *p_ll2_info = NULL;
+ u8 i, first_idx, last_idx, *p_tx_max;
+ int rc;
+
+ if (!data->p_connection_handle || !p_hwfn->p_ll2_info)
+ return -EINVAL;
+
+ _qed_ll2_calc_allowed_conns(p_hwfn, data, &first_idx, &last_idx);
+
+ /* Find a free connection to be used */
+ for (i = first_idx; i < last_idx; i++) {
+ mutex_lock(&p_hwfn->p_ll2_info[i].mutex);
+ if (p_hwfn->p_ll2_info[i].b_active) {
+ mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
+ continue;
+ }
+
+ p_hwfn->p_ll2_info[i].b_active = true;
+ p_ll2_info = &p_hwfn->p_ll2_info[i];
+ mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
+ break;
+ }
+ if (!p_ll2_info)
+ return -EBUSY;
+
+ memcpy(&p_ll2_info->input, &data->input, sizeof(p_ll2_info->input));
+
+ switch (data->input.tx_dest) {
+ case QED_LL2_TX_DEST_NW:
+ p_ll2_info->tx_dest = CORE_TX_DEST_NW;
+ break;
+ case QED_LL2_TX_DEST_LB:
+ p_ll2_info->tx_dest = CORE_TX_DEST_LB;
+ break;
+ case QED_LL2_TX_DEST_DROP:
+ p_ll2_info->tx_dest = CORE_TX_DEST_DROP;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (data->input.conn_type == QED_LL2_TYPE_OOO ||
+ data->input.secondary_queue)
+ p_ll2_info->main_func_queue = false;
+ else
+ p_ll2_info->main_func_queue = true;
+
+ /* Correct maximum number of Tx BDs */
+ p_tx_max = &p_ll2_info->input.tx_max_bds_per_packet;
+ if (*p_tx_max == 0)
+ *p_tx_max = CORE_LL2_TX_MAX_BDS_PER_PACKET;
+ else
+ *p_tx_max = min_t(u8, *p_tx_max,
+ CORE_LL2_TX_MAX_BDS_PER_PACKET);
+
+ rc = qed_ll2_set_cbs(p_ll2_info, data->cbs);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Invalid callback functions\n");
+ goto q_allocate_fail;
+ }
+
+ rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info);
+ if (rc)
+ goto q_allocate_fail;
+
+ rc = qed_ll2_acquire_connection_tx(p_hwfn, p_ll2_info);
+ if (rc)
+ goto q_allocate_fail;
+
+ rc = qed_ll2_acquire_connection_ooo(p_hwfn, p_ll2_info,
+ data->input.mtu);
+ if (rc)
+ goto q_allocate_fail;
+
+ /* Register callbacks for the Rx/Tx queues */
+ if (data->input.conn_type == QED_LL2_TYPE_OOO) {
+ comp_rx_cb = qed_ll2_lb_rxq_completion;
+ comp_tx_cb = qed_ll2_lb_txq_completion;
+ } else {
+ comp_rx_cb = qed_ll2_rxq_completion;
+ comp_tx_cb = qed_ll2_txq_completion;
+ }
+
+ if (data->input.rx_num_desc) {
+ qed_int_register_cb(p_hwfn, comp_rx_cb,
+ &p_hwfn->p_ll2_info[i],
+ &p_ll2_info->rx_queue.rx_sb_index,
+ &p_ll2_info->rx_queue.p_fw_cons);
+ p_ll2_info->rx_queue.b_cb_registered = true;
+ }
+
+ if (data->input.tx_num_desc) {
+ qed_int_register_cb(p_hwfn,
+ comp_tx_cb,
+ &p_hwfn->p_ll2_info[i],
+ &p_ll2_info->tx_queue.tx_sb_index,
+ &p_ll2_info->tx_queue.p_fw_cons);
+ p_ll2_info->tx_queue.b_cb_registered = true;
+ }
+
+ *data->p_connection_handle = i;
+ return rc;
+
+q_allocate_fail:
+ qed_ll2_release_connection(p_hwfn, i);
+ return -ENOMEM;
+}
+
+static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_conn)
+{
+ enum qed_ll2_error_handle error_input;
+ enum core_error_handle error_mode;
+ u8 action_on_error = 0;
+ int rc;
+
+ if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
+ return 0;
+
+ DIRECT_REG_WR(p_ll2_conn->rx_queue.set_prod_addr, 0x0);
+ error_input = p_ll2_conn->input.ai_err_packet_too_big;
+ error_mode = qed_ll2_get_error_choice(error_input);
+ SET_FIELD(action_on_error,
+ CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG, error_mode);
+ error_input = p_ll2_conn->input.ai_err_no_buf;
+ error_mode = qed_ll2_get_error_choice(error_input);
+ SET_FIELD(action_on_error, CORE_RX_ACTION_ON_ERROR_NO_BUFF, error_mode);
+
+ rc = qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error);
+ if (rc)
+ return rc;
+
+ if (p_ll2_conn->rx_queue.ctx_based) {
+ rc = qed_db_recovery_add(p_hwfn->cdev,
+ p_ll2_conn->rx_queue.set_prod_addr,
+ &p_ll2_conn->rx_queue.db_data,
+ DB_REC_WIDTH_64B, DB_REC_KERNEL);
+ }
+
+ return rc;
+}
+
+static void
+qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_conn)
+{
+ if (p_ll2_conn->input.conn_type != QED_LL2_TYPE_OOO)
+ return;
+
+ qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
+ qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
+}
+
+static inline u8 qed_ll2_handle_to_queue_id(struct qed_hwfn *p_hwfn,
+ u8 handle,
+ u8 ll2_queue_type)
+{
+ u8 qid;
+
+ if (ll2_queue_type == QED_LL2_RX_TYPE_LEGACY)
+ return p_hwfn->hw_info.resc_start[QED_LL2_RAM_QUEUE] + handle;
+
+ /* QED_LL2_RX_TYPE_CTX
+ * FW distinguishes between the legacy queues (ram based) and the
+ * ctx based queues by the queue_id.
+ * The first MAX_NUM_LL2_RX_RAM_QUEUES queues are legacy
+ * and the queue ids above that are ctx base.
+ */
+ qid = p_hwfn->hw_info.resc_start[QED_LL2_CTX_QUEUE] +
+ MAX_NUM_LL2_RX_RAM_QUEUES;
+
+ /* See comment on the acquire connection for how the ll2
+ * queues handles are divided.
+ */
+ qid += (handle - QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF);
+
+ return qid;
+}
+
+int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
+{
+ struct core_conn_context *p_cxt;
+ struct qed_ll2_tx_packet *p_pkt;
+ struct qed_ll2_info *p_ll2_conn;
+ struct qed_hwfn *p_hwfn = cxt;
+ struct qed_ll2_rx_queue *p_rx;
+ struct qed_ll2_tx_queue *p_tx;
+ struct qed_cxt_info cxt_info;
+ struct qed_ptt *p_ptt;
+ int rc = -EINVAL;
+ u32 i, capacity;
+ size_t desc_size;
+ u8 qid, stats_id;
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EAGAIN;
+
+ p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
+ if (!p_ll2_conn) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ p_rx = &p_ll2_conn->rx_queue;
+ p_tx = &p_ll2_conn->tx_queue;
+
+ qed_chain_reset(&p_rx->rxq_chain);
+ qed_chain_reset(&p_rx->rcq_chain);
+ INIT_LIST_HEAD(&p_rx->active_descq);
+ INIT_LIST_HEAD(&p_rx->free_descq);
+ INIT_LIST_HEAD(&p_rx->posting_descq);
+ spin_lock_init(&p_rx->lock);
+ capacity = qed_chain_get_capacity(&p_rx->rxq_chain);
+ for (i = 0; i < capacity; i++)
+ list_add_tail(&p_rx->descq_array[i].list_entry,
+ &p_rx->free_descq);
+ *p_rx->p_fw_cons = 0;
+
+ qed_chain_reset(&p_tx->txq_chain);
+ INIT_LIST_HEAD(&p_tx->active_descq);
+ INIT_LIST_HEAD(&p_tx->free_descq);
+ INIT_LIST_HEAD(&p_tx->sending_descq);
+ spin_lock_init(&p_tx->lock);
+ capacity = qed_chain_get_capacity(&p_tx->txq_chain);
+ /* All bds_set elements are flexibily added. */
+ desc_size = struct_size(p_pkt, bds_set,
+ p_ll2_conn->input.tx_max_bds_per_packet);
+
+ for (i = 0; i < capacity; i++) {
+ p_pkt = p_tx->descq_mem + desc_size * i;
+ list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
+ }
+ p_tx->cur_completing_bd_idx = 0;
+ p_tx->bds_idx = 0;
+ p_tx->b_completing_packet = false;
+ p_tx->cur_send_packet = NULL;
+ p_tx->cur_send_frag_num = 0;
+ p_tx->cur_completing_frag_num = 0;
+ *p_tx->p_fw_cons = 0;
+
+ rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_ll2_conn->cid);
+ if (rc)
+ goto out;
+ cxt_info.iid = p_ll2_conn->cid;
+ rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n",
+ p_ll2_conn->cid);
+ goto out;
+ }
+
+ p_cxt = cxt_info.p_cxt;
+
+ memset(p_cxt, 0, sizeof(*p_cxt));
+
+ qid = qed_ll2_handle_to_queue_id(p_hwfn, connection_handle,
+ p_ll2_conn->input.rx_conn_type);
+ stats_id = qed_ll2_handle_to_stats_id(p_hwfn,
+ p_ll2_conn->input.rx_conn_type,
+ qid);
+ p_ll2_conn->queue_id = qid;
+ p_ll2_conn->tx_stats_id = stats_id;
+
+ /* If there is no valid stats id for this connection, disable stats */
+ if (p_ll2_conn->tx_stats_id == QED_LL2_INVALID_STATS_ID) {
+ p_ll2_conn->tx_stats_en = 0;
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_LL2,
+ "Disabling stats for queue %d - not enough counters\n",
+ qid);
+ }
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_LL2,
+ "Establishing ll2 queue. PF %d ctx_based=%d abs qid=%d stats_id=%d\n",
+ p_hwfn->rel_pf_id,
+ p_ll2_conn->input.rx_conn_type, qid, stats_id);
+
+ if (p_ll2_conn->input.rx_conn_type == QED_LL2_RX_TYPE_LEGACY) {
+ p_rx->set_prod_addr =
+ (u8 __iomem *)p_hwfn->regview +
+ GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_TSDM_RAM,
+ TSTORM_LL2_RX_PRODS, qid);
+ } else {
+ /* QED_LL2_RX_TYPE_CTX - using doorbell */
+ p_rx->ctx_based = 1;
+
+ p_rx->set_prod_addr = p_hwfn->doorbells +
+ p_hwfn->dpi_start_offset +
+ DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_LL2_PROD_UPDATE);
+
+ /* prepare db data */
+ p_rx->db_data.icid = cpu_to_le16((u16)p_ll2_conn->cid);
+ SET_FIELD(p_rx->db_data.params,
+ CORE_PWM_PROD_UPDATE_DATA_AGG_CMD, DB_AGG_CMD_SET);
+ SET_FIELD(p_rx->db_data.params,
+ CORE_PWM_PROD_UPDATE_DATA_RESERVED1, 0);
+ }
+
+ p_tx->doorbell_addr = (u8 __iomem *)p_hwfn->doorbells +
+ qed_db_addr(p_ll2_conn->cid,
+ DQ_DEMS_LEGACY);
+ /* prepare db data */
+ SET_FIELD(p_tx->db_msg.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
+ SET_FIELD(p_tx->db_msg.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
+ SET_FIELD(p_tx->db_msg.params, CORE_DB_DATA_AGG_VAL_SEL,
+ DQ_XCM_CORE_TX_BD_PROD_CMD);
+ p_tx->db_msg.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
+
+ rc = qed_ll2_establish_connection_rx(p_hwfn, p_ll2_conn);
+ if (rc)
+ goto out;
+
+ rc = qed_sp_ll2_tx_queue_start(p_hwfn, p_ll2_conn);
+ if (rc)
+ goto out;
+
+ if (!QED_IS_RDMA_PERSONALITY(p_hwfn) &&
+ !QED_IS_NVMETCP_PERSONALITY(p_hwfn))
+ qed_wr(p_hwfn, p_ptt, PRS_REG_USE_LIGHT_L2, 1);
+
+ qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn);
+
+ if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) {
+ if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
+ qed_llh_add_protocol_filter(p_hwfn->cdev, 0,
+ QED_LLH_FILTER_ETHERTYPE,
+ ETH_P_FCOE, 0);
+ qed_llh_add_protocol_filter(p_hwfn->cdev, 0,
+ QED_LLH_FILTER_ETHERTYPE,
+ ETH_P_FIP, 0);
+ }
+
+out:
+ qed_ptt_release(p_hwfn, p_ptt);
+ return rc;
+}
+
+static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_rx_queue *p_rx,
+ struct qed_ll2_rx_packet *p_curp)
+{
+ struct qed_ll2_rx_packet *p_posting_packet = NULL;
+ struct core_ll2_rx_prod rx_prod = { 0, 0 };
+ bool b_notify_fw = false;
+ u16 bd_prod, cq_prod;
+
+ /* This handles the flushing of already posted buffers */
+ while (!list_empty(&p_rx->posting_descq)) {
+ p_posting_packet = list_first_entry(&p_rx->posting_descq,
+ struct qed_ll2_rx_packet,
+ list_entry);
+ list_move_tail(&p_posting_packet->list_entry,
+ &p_rx->active_descq);
+ b_notify_fw = true;
+ }
+
+ /* This handles the supplied packet [if there is one] */
+ if (p_curp) {
+ list_add_tail(&p_curp->list_entry, &p_rx->active_descq);
+ b_notify_fw = true;
+ }
+
+ if (!b_notify_fw)
+ return;
+
+ bd_prod = qed_chain_get_prod_idx(&p_rx->rxq_chain);
+ cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain);
+ if (p_rx->ctx_based) {
+ /* update producer by giving a doorbell */
+ p_rx->db_data.prod.bd_prod = cpu_to_le16(bd_prod);
+ p_rx->db_data.prod.cqe_prod = cpu_to_le16(cq_prod);
+ /* Make sure chain element is updated before ringing the
+ * doorbell
+ */
+ dma_wmb();
+ DIRECT_REG_WR64(p_rx->set_prod_addr,
+ *((u64 *)&p_rx->db_data));
+ } else {
+ rx_prod.bd_prod = cpu_to_le16(bd_prod);
+ rx_prod.cqe_prod = cpu_to_le16(cq_prod);
+
+ /* Make sure chain element is updated before ringing the
+ * doorbell
+ */
+ dma_wmb();
+
+ DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
+ }
+}
+
+int qed_ll2_post_rx_buffer(void *cxt,
+ u8 connection_handle,
+ dma_addr_t addr,
+ u16 buf_len, void *cookie, u8 notify_fw)
+{
+ struct qed_hwfn *p_hwfn = cxt;
+ struct core_rx_bd_with_buff_len *p_curb = NULL;
+ struct qed_ll2_rx_packet *p_curp = NULL;
+ struct qed_ll2_info *p_ll2_conn;
+ struct qed_ll2_rx_queue *p_rx;
+ unsigned long flags;
+ void *p_data;
+ int rc = 0;
+
+ p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
+ if (!p_ll2_conn)
+ return -EINVAL;
+ p_rx = &p_ll2_conn->rx_queue;
+ if (!p_rx->set_prod_addr)
+ return -EIO;
+
+ spin_lock_irqsave(&p_rx->lock, flags);
+ if (!list_empty(&p_rx->free_descq))
+ p_curp = list_first_entry(&p_rx->free_descq,
+ struct qed_ll2_rx_packet, list_entry);
+ if (p_curp) {
+ if (qed_chain_get_elem_left(&p_rx->rxq_chain) &&
+ qed_chain_get_elem_left(&p_rx->rcq_chain)) {
+ p_data = qed_chain_produce(&p_rx->rxq_chain);
+ p_curb = (struct core_rx_bd_with_buff_len *)p_data;
+ qed_chain_produce(&p_rx->rcq_chain);
+ }
+ }
+
+ /* If we're lacking entries, let's try to flush buffers to FW */
+ if (!p_curp || !p_curb) {
+ rc = -EBUSY;
+ p_curp = NULL;
+ goto out_notify;
+ }
+
+ /* We have an Rx packet we can fill */
+ DMA_REGPAIR_LE(p_curb->addr, addr);
+ p_curb->buff_length = cpu_to_le16(buf_len);
+ p_curp->rx_buf_addr = addr;
+ p_curp->cookie = cookie;
+ p_curp->rxq_bd = p_curb;
+ p_curp->buf_length = buf_len;
+ list_del(&p_curp->list_entry);
+
+ /* Check if we only want to enqueue this packet without informing FW */
+ if (!notify_fw) {
+ list_add_tail(&p_curp->list_entry, &p_rx->posting_descq);
+ goto out;
+ }
+
+out_notify:
+ qed_ll2_post_rx_buffer_notify_fw(p_hwfn, p_rx, p_curp);
+out:
+ spin_unlock_irqrestore(&p_rx->lock, flags);
+ return rc;
+}
+
+static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_tx_queue *p_tx,
+ struct qed_ll2_tx_packet *p_curp,
+ struct qed_ll2_tx_pkt_info *pkt,
+ u8 notify_fw)
+{
+ list_del(&p_curp->list_entry);
+ p_curp->cookie = pkt->cookie;
+ p_curp->bd_used = pkt->num_of_bds;
+ p_curp->notify_fw = notify_fw;
+ p_tx->cur_send_packet = p_curp;
+ p_tx->cur_send_frag_num = 0;
+
+ p_curp->bds_set[p_tx->cur_send_frag_num].tx_frag = pkt->first_frag;
+ p_curp->bds_set[p_tx->cur_send_frag_num].frag_len = pkt->first_frag_len;
+ p_tx->cur_send_frag_num++;
+}
+
+static void
+qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2,
+ struct qed_ll2_tx_packet *p_curp,
+ struct qed_ll2_tx_pkt_info *pkt)
+{
+ struct qed_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain;
+ u16 prod_idx = qed_chain_get_prod_idx(p_tx_chain);
+ struct core_tx_bd *start_bd = NULL;
+ enum core_roce_flavor_type roce_flavor;
+ enum core_tx_dest tx_dest;
+ u16 bd_data = 0, frag_idx;
+ u16 bitfield1;
+
+ roce_flavor = (pkt->qed_roce_flavor == QED_LL2_ROCE) ? CORE_ROCE
+ : CORE_RROCE;
+
+ switch (pkt->tx_dest) {
+ case QED_LL2_TX_DEST_NW:
+ tx_dest = CORE_TX_DEST_NW;
+ break;
+ case QED_LL2_TX_DEST_LB:
+ tx_dest = CORE_TX_DEST_LB;
+ break;
+ case QED_LL2_TX_DEST_DROP:
+ tx_dest = CORE_TX_DEST_DROP;
+ break;
+ default:
+ tx_dest = CORE_TX_DEST_LB;
+ break;
+ }
+
+ start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
+ if (likely(QED_IS_IWARP_PERSONALITY(p_hwfn) &&
+ p_ll2->input.conn_type == QED_LL2_TYPE_OOO)) {
+ start_bd->nw_vlan_or_lb_echo =
+ cpu_to_le16(IWARP_LL2_IN_ORDER_TX_QUEUE);
+ } else {
+ start_bd->nw_vlan_or_lb_echo = cpu_to_le16(pkt->vlan);
+ if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) &&
+ p_ll2->input.conn_type == QED_LL2_TYPE_FCOE)
+ pkt->remove_stag = true;
+ }
+
+ bitfield1 = le16_to_cpu(start_bd->bitfield1);
+ SET_FIELD(bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W, pkt->l4_hdr_offset_w);
+ SET_FIELD(bitfield1, CORE_TX_BD_TX_DST, tx_dest);
+ start_bd->bitfield1 = cpu_to_le16(bitfield1);
+
+ bd_data |= pkt->bd_flags;
+ SET_FIELD(bd_data, CORE_TX_BD_DATA_START_BD, 0x1);
+ SET_FIELD(bd_data, CORE_TX_BD_DATA_NBDS, pkt->num_of_bds);
+ SET_FIELD(bd_data, CORE_TX_BD_DATA_ROCE_FLAV, roce_flavor);
+ SET_FIELD(bd_data, CORE_TX_BD_DATA_IP_CSUM, !!(pkt->enable_ip_cksum));
+ SET_FIELD(bd_data, CORE_TX_BD_DATA_L4_CSUM, !!(pkt->enable_l4_cksum));
+ SET_FIELD(bd_data, CORE_TX_BD_DATA_IP_LEN, !!(pkt->calc_ip_len));
+ SET_FIELD(bd_data, CORE_TX_BD_DATA_DISABLE_STAG_INSERTION,
+ !!(pkt->remove_stag));
+
+ start_bd->bd_data.as_bitfield = cpu_to_le16(bd_data);
+ DMA_REGPAIR_LE(start_bd->addr, pkt->first_frag);
+ start_bd->nbytes = cpu_to_le16(pkt->first_frag_len);
+
+ DP_VERBOSE(p_hwfn,
+ (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
+ "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
+ p_ll2->queue_id,
+ p_ll2->cid,
+ p_ll2->input.conn_type,
+ prod_idx,
+ pkt->first_frag_len,
+ pkt->num_of_bds,
+ le32_to_cpu(start_bd->addr.hi),
+ le32_to_cpu(start_bd->addr.lo));
+
+ if (p_ll2->tx_queue.cur_send_frag_num == pkt->num_of_bds)
+ return;
+
+ /* Need to provide the packet with additional BDs for frags */
+ for (frag_idx = p_ll2->tx_queue.cur_send_frag_num;
+ frag_idx < pkt->num_of_bds; frag_idx++) {
+ struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd;
+
+ *p_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
+ (*p_bd)->bd_data.as_bitfield = 0;
+ (*p_bd)->bitfield1 = 0;
+ p_curp->bds_set[frag_idx].tx_frag = 0;
+ p_curp->bds_set[frag_idx].frag_len = 0;
+ }
+}
+
+/* This should be called while the Txq spinlock is being held */
+static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_conn)
+{
+ bool b_notify = p_ll2_conn->tx_queue.cur_send_packet->notify_fw;
+ struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
+ struct qed_ll2_tx_packet *p_pkt = NULL;
+ u16 bd_prod;
+
+ /* If there are missing BDs, don't do anything now */
+ if (p_ll2_conn->tx_queue.cur_send_frag_num !=
+ p_ll2_conn->tx_queue.cur_send_packet->bd_used)
+ return;
+
+ /* Push the current packet to the list and clean after it */
+ list_add_tail(&p_ll2_conn->tx_queue.cur_send_packet->list_entry,
+ &p_ll2_conn->tx_queue.sending_descq);
+ p_ll2_conn->tx_queue.cur_send_packet = NULL;
+ p_ll2_conn->tx_queue.cur_send_frag_num = 0;
+
+ /* Notify FW of packet only if requested to */
+ if (!b_notify)
+ return;
+
+ bd_prod = qed_chain_get_prod_idx(&p_ll2_conn->tx_queue.txq_chain);
+
+ while (!list_empty(&p_tx->sending_descq)) {
+ p_pkt = list_first_entry(&p_tx->sending_descq,
+ struct qed_ll2_tx_packet, list_entry);
+ if (!p_pkt)
+ break;
+
+ list_move_tail(&p_pkt->list_entry, &p_tx->active_descq);
+ }
+
+ p_tx->db_msg.spq_prod = cpu_to_le16(bd_prod);
+
+ /* Make sure the BDs data is updated before ringing the doorbell */
+ wmb();
+
+ DIRECT_REG_WR(p_tx->doorbell_addr, *((u32 *)&p_tx->db_msg));
+
+ DP_VERBOSE(p_hwfn,
+ (NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
+ "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n",
+ p_ll2_conn->queue_id,
+ p_ll2_conn->cid,
+ p_ll2_conn->input.conn_type, p_tx->db_msg.spq_prod);
+}
+
+int qed_ll2_prepare_tx_packet(void *cxt,
+ u8 connection_handle,
+ struct qed_ll2_tx_pkt_info *pkt,
+ bool notify_fw)
+{
+ struct qed_hwfn *p_hwfn = cxt;
+ struct qed_ll2_tx_packet *p_curp = NULL;
+ struct qed_ll2_info *p_ll2_conn = NULL;
+ struct qed_ll2_tx_queue *p_tx;
+ struct qed_chain *p_tx_chain;
+ unsigned long flags;
+ int rc = 0;
+
+ p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
+ if (unlikely(!p_ll2_conn))
+ return -EINVAL;
+ p_tx = &p_ll2_conn->tx_queue;
+ p_tx_chain = &p_tx->txq_chain;
+
+ if (unlikely(pkt->num_of_bds > p_ll2_conn->input.tx_max_bds_per_packet))
+ return -EIO;
+
+ spin_lock_irqsave(&p_tx->lock, flags);
+ if (unlikely(p_tx->cur_send_packet)) {
+ rc = -EEXIST;
+ goto out;
+ }
+
+ /* Get entry, but only if we have tx elements for it */
+ if (unlikely(!list_empty(&p_tx->free_descq)))
+ p_curp = list_first_entry(&p_tx->free_descq,
+ struct qed_ll2_tx_packet, list_entry);
+ if (unlikely(p_curp &&
+ qed_chain_get_elem_left(p_tx_chain) < pkt->num_of_bds))
+ p_curp = NULL;
+
+ if (unlikely(!p_curp)) {
+ rc = -EBUSY;
+ goto out;
+ }
+
+ /* Prepare packet and BD, and perhaps send a doorbell to FW */
+ qed_ll2_prepare_tx_packet_set(p_hwfn, p_tx, p_curp, pkt, notify_fw);
+
+ qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp, pkt);
+
+ qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
+
+out:
+ spin_unlock_irqrestore(&p_tx->lock, flags);
+ return rc;
+}
+
+int qed_ll2_set_fragment_of_tx_packet(void *cxt,
+ u8 connection_handle,
+ dma_addr_t addr, u16 nbytes)
+{
+ struct qed_ll2_tx_packet *p_cur_send_packet = NULL;
+ struct qed_hwfn *p_hwfn = cxt;
+ struct qed_ll2_info *p_ll2_conn = NULL;
+ u16 cur_send_frag_num = 0;
+ struct core_tx_bd *p_bd;
+ unsigned long flags;
+
+ p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
+ if (unlikely(!p_ll2_conn))
+ return -EINVAL;
+
+ if (unlikely(!p_ll2_conn->tx_queue.cur_send_packet))
+ return -EINVAL;
+
+ p_cur_send_packet = p_ll2_conn->tx_queue.cur_send_packet;
+ cur_send_frag_num = p_ll2_conn->tx_queue.cur_send_frag_num;
+
+ if (unlikely(cur_send_frag_num >= p_cur_send_packet->bd_used))
+ return -EINVAL;
+
+ /* Fill the BD information, and possibly notify FW */
+ p_bd = p_cur_send_packet->bds_set[cur_send_frag_num].txq_bd;
+ DMA_REGPAIR_LE(p_bd->addr, addr);
+ p_bd->nbytes = cpu_to_le16(nbytes);
+ p_cur_send_packet->bds_set[cur_send_frag_num].tx_frag = addr;
+ p_cur_send_packet->bds_set[cur_send_frag_num].frag_len = nbytes;
+
+ p_ll2_conn->tx_queue.cur_send_frag_num++;
+
+ spin_lock_irqsave(&p_ll2_conn->tx_queue.lock, flags);
+ qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn);
+ spin_unlock_irqrestore(&p_ll2_conn->tx_queue.lock, flags);
+
+ return 0;
+}
+
+int qed_ll2_terminate_connection(void *cxt, u8 connection_handle)
+{
+ struct qed_hwfn *p_hwfn = cxt;
+ struct qed_ll2_info *p_ll2_conn = NULL;
+ int rc = -EINVAL;
+ struct qed_ptt *p_ptt;
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EAGAIN;
+
+ p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle);
+ if (!p_ll2_conn) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* Stop Tx & Rx of connection, if needed */
+ if (QED_LL2_TX_REGISTERED(p_ll2_conn)) {
+ p_ll2_conn->tx_queue.b_cb_registered = false;
+ smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */
+ rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn);
+ if (rc)
+ goto out;
+
+ qed_ll2_txq_flush(p_hwfn, connection_handle);
+ qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index);
+ }
+
+ if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
+ p_ll2_conn->rx_queue.b_cb_registered = false;
+ smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */
+
+ if (p_ll2_conn->rx_queue.ctx_based)
+ qed_db_recovery_del(p_hwfn->cdev,
+ p_ll2_conn->rx_queue.set_prod_addr,
+ &p_ll2_conn->rx_queue.db_data);
+
+ rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn);
+ if (rc)
+ goto out;
+
+ qed_ll2_rxq_flush(p_hwfn, connection_handle);
+ qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index);
+ }
+
+ if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO)
+ qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
+
+ if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) {
+ if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
+ qed_llh_remove_protocol_filter(p_hwfn->cdev, 0,
+ QED_LLH_FILTER_ETHERTYPE,
+ ETH_P_FCOE, 0);
+ qed_llh_remove_protocol_filter(p_hwfn->cdev, 0,
+ QED_LLH_FILTER_ETHERTYPE,
+ ETH_P_FIP, 0);
+ }
+
+out:
+ qed_ptt_release(p_hwfn, p_ptt);
+ return rc;
+}
+
+static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_info *p_ll2_conn)
+{
+ struct qed_ooo_buffer *p_buffer;
+
+ if (p_ll2_conn->input.conn_type != QED_LL2_TYPE_OOO)
+ return;
+
+ qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
+ while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn,
+ p_hwfn->p_ooo_info))) {
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ p_buffer->rx_buffer_size,
+ p_buffer->rx_buffer_virt_addr,
+ p_buffer->rx_buffer_phys_addr);
+ kfree(p_buffer);
+ }
+}
+
+void qed_ll2_release_connection(void *cxt, u8 connection_handle)
+{
+ struct qed_hwfn *p_hwfn = cxt;
+ struct qed_ll2_info *p_ll2_conn = NULL;
+
+ p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
+ if (!p_ll2_conn)
+ return;
+
+ kfree(p_ll2_conn->tx_queue.descq_mem);
+ qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain);
+
+ kfree(p_ll2_conn->rx_queue.descq_array);
+ qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rxq_chain);
+ qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rcq_chain);
+
+ qed_cxt_release_cid(p_hwfn, p_ll2_conn->cid);
+
+ qed_ll2_release_connection_ooo(p_hwfn, p_ll2_conn);
+
+ mutex_lock(&p_ll2_conn->mutex);
+ p_ll2_conn->b_active = false;
+ mutex_unlock(&p_ll2_conn->mutex);
+}
+
+int qed_ll2_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ll2_info *p_ll2_connections;
+ u8 i;
+
+ /* Allocate LL2's set struct */
+ p_ll2_connections = kcalloc(QED_MAX_NUM_OF_LL2_CONNECTIONS,
+ sizeof(struct qed_ll2_info), GFP_KERNEL);
+ if (!p_ll2_connections) {
+ DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_ll2'\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
+ p_ll2_connections[i].my_id = i;
+
+ p_hwfn->p_ll2_info = p_ll2_connections;
+ return 0;
+}
+
+void qed_ll2_setup(struct qed_hwfn *p_hwfn)
+{
+ int i;
+
+ for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++)
+ mutex_init(&p_hwfn->p_ll2_info[i].mutex);
+}
+
+void qed_ll2_free(struct qed_hwfn *p_hwfn)
+{
+ if (!p_hwfn->p_ll2_info)
+ return;
+
+ kfree(p_hwfn->p_ll2_info);
+ p_hwfn->p_ll2_info = NULL;
+}
+
+static void _qed_ll2_get_port_stats(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_ll2_stats *p_stats)
+{
+ struct core_ll2_port_stats port_stats;
+
+ memset(&port_stats, 0, sizeof(port_stats));
+ qed_memcpy_from(p_hwfn, p_ptt, &port_stats,
+ BAR0_MAP_REG_TSDM_RAM +
+ TSTORM_LL2_PORT_STAT_OFFSET(MFW_PORT(p_hwfn)),
+ sizeof(port_stats));
+
+ p_stats->gsi_invalid_hdr += HILO_64_REGPAIR(port_stats.gsi_invalid_hdr);
+ p_stats->gsi_invalid_pkt_length +=
+ HILO_64_REGPAIR(port_stats.gsi_invalid_pkt_length);
+ p_stats->gsi_unsupported_pkt_typ +=
+ HILO_64_REGPAIR(port_stats.gsi_unsupported_pkt_typ);
+ p_stats->gsi_crcchksm_error +=
+ HILO_64_REGPAIR(port_stats.gsi_crcchksm_error);
+}
+
+static void _qed_ll2_get_tstats(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_ll2_info *p_ll2_conn,
+ struct qed_ll2_stats *p_stats)
+{
+ struct core_ll2_tstorm_per_queue_stat tstats;
+ u8 qid = p_ll2_conn->queue_id;
+ u32 tstats_addr;
+
+ memset(&tstats, 0, sizeof(tstats));
+ tstats_addr = BAR0_MAP_REG_TSDM_RAM +
+ CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(qid);
+ qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats));
+
+ p_stats->packet_too_big_discard +=
+ HILO_64_REGPAIR(tstats.packet_too_big_discard);
+ p_stats->no_buff_discard += HILO_64_REGPAIR(tstats.no_buff_discard);
+}
+
+static void _qed_ll2_get_ustats(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_ll2_info *p_ll2_conn,
+ struct qed_ll2_stats *p_stats)
+{
+ struct core_ll2_ustorm_per_queue_stat ustats;
+ u8 qid = p_ll2_conn->queue_id;
+ u32 ustats_addr;
+
+ memset(&ustats, 0, sizeof(ustats));
+ ustats_addr = BAR0_MAP_REG_USDM_RAM +
+ CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(qid);
+ qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, sizeof(ustats));
+
+ p_stats->rcv_ucast_bytes += HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
+ p_stats->rcv_mcast_bytes += HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
+ p_stats->rcv_bcast_bytes += HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
+ p_stats->rcv_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
+ p_stats->rcv_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
+ p_stats->rcv_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
+}
+
+static void _qed_ll2_get_pstats(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_ll2_info *p_ll2_conn,
+ struct qed_ll2_stats *p_stats)
+{
+ struct core_ll2_pstorm_per_queue_stat pstats;
+ u8 stats_id = p_ll2_conn->tx_stats_id;
+ u32 pstats_addr;
+
+ memset(&pstats, 0, sizeof(pstats));
+ pstats_addr = BAR0_MAP_REG_PSDM_RAM +
+ CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(stats_id);
+ qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats));
+
+ p_stats->sent_ucast_bytes += HILO_64_REGPAIR(pstats.sent_ucast_bytes);
+ p_stats->sent_mcast_bytes += HILO_64_REGPAIR(pstats.sent_mcast_bytes);
+ p_stats->sent_bcast_bytes += HILO_64_REGPAIR(pstats.sent_bcast_bytes);
+ p_stats->sent_ucast_pkts += HILO_64_REGPAIR(pstats.sent_ucast_pkts);
+ p_stats->sent_mcast_pkts += HILO_64_REGPAIR(pstats.sent_mcast_pkts);
+ p_stats->sent_bcast_pkts += HILO_64_REGPAIR(pstats.sent_bcast_pkts);
+}
+
+static int __qed_ll2_get_stats(void *cxt, u8 connection_handle,
+ struct qed_ll2_stats *p_stats)
+{
+ struct qed_hwfn *p_hwfn = cxt;
+ struct qed_ll2_info *p_ll2_conn = NULL;
+ struct qed_ptt *p_ptt;
+
+ if ((connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS) ||
+ !p_hwfn->p_ll2_info)
+ return -EINVAL;
+
+ p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle];
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ DP_ERR(p_hwfn, "Failed to acquire ptt\n");
+ return -EINVAL;
+ }
+
+ if (p_ll2_conn->input.gsi_enable)
+ _qed_ll2_get_port_stats(p_hwfn, p_ptt, p_stats);
+
+ _qed_ll2_get_tstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
+
+ _qed_ll2_get_ustats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
+
+ if (p_ll2_conn->tx_stats_en)
+ _qed_ll2_get_pstats(p_hwfn, p_ptt, p_ll2_conn, p_stats);
+
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ return 0;
+}
+
+int qed_ll2_get_stats(void *cxt,
+ u8 connection_handle, struct qed_ll2_stats *p_stats)
+{
+ memset(p_stats, 0, sizeof(*p_stats));
+ return __qed_ll2_get_stats(cxt, connection_handle, p_stats);
+}
+
+static void qed_ll2b_release_rx_packet(void *cxt,
+ u8 connection_handle,
+ void *cookie,
+ dma_addr_t rx_buf_addr,
+ bool b_last_packet)
+{
+ struct qed_hwfn *p_hwfn = cxt;
+
+ qed_ll2_dealloc_buffer(p_hwfn->cdev, cookie);
+}
+
+static void qed_ll2_register_cb_ops(struct qed_dev *cdev,
+ const struct qed_ll2_cb_ops *ops,
+ void *cookie)
+{
+ cdev->ll2->cbs = ops;
+ cdev->ll2->cb_cookie = cookie;
+}
+
+static struct qed_ll2_cbs ll2_cbs = {
+ .rx_comp_cb = &qed_ll2b_complete_rx_packet,
+ .rx_release_cb = &qed_ll2b_release_rx_packet,
+ .tx_comp_cb = &qed_ll2b_complete_tx_packet,
+ .tx_release_cb = &qed_ll2b_complete_tx_packet,
+};
+
+static void qed_ll2_set_conn_data(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_acquire_data *data,
+ struct qed_ll2_params *params,
+ enum qed_ll2_conn_type conn_type,
+ u8 *handle, bool lb)
+{
+ memset(data, 0, sizeof(*data));
+
+ data->input.conn_type = conn_type;
+ data->input.mtu = params->mtu;
+ data->input.rx_num_desc = QED_LL2_RX_SIZE;
+ data->input.rx_drop_ttl0_flg = params->drop_ttl0_packets;
+ data->input.rx_vlan_removal_en = params->rx_vlan_stripping;
+ data->input.tx_num_desc = QED_LL2_TX_SIZE;
+ data->p_connection_handle = handle;
+ data->cbs = &ll2_cbs;
+ ll2_cbs.cookie = p_hwfn;
+
+ if (lb) {
+ data->input.tx_tc = PKT_LB_TC;
+ data->input.tx_dest = QED_LL2_TX_DEST_LB;
+ } else {
+ data->input.tx_tc = 0;
+ data->input.tx_dest = QED_LL2_TX_DEST_NW;
+ }
+}
+
+static int qed_ll2_start_ooo(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_params *params)
+{
+ u8 *handle = &p_hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
+ struct qed_ll2_acquire_data data;
+ int rc;
+
+ qed_ll2_set_conn_data(p_hwfn, &data, params,
+ QED_LL2_TYPE_OOO, handle, true);
+
+ rc = qed_ll2_acquire_connection(p_hwfn, &data);
+ if (rc) {
+ DP_INFO(p_hwfn, "Failed to acquire LL2 OOO connection\n");
+ goto out;
+ }
+
+ rc = qed_ll2_establish_connection(p_hwfn, *handle);
+ if (rc) {
+ DP_INFO(p_hwfn, "Failed to establish LL2 OOO connection\n");
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ qed_ll2_release_connection(p_hwfn, *handle);
+out:
+ *handle = QED_LL2_UNUSED_HANDLE;
+ return rc;
+}
+
+static bool qed_ll2_is_storage_eng1(struct qed_dev *cdev)
+{
+ return (QED_IS_FCOE_PERSONALITY(QED_LEADING_HWFN(cdev)) ||
+ QED_IS_ISCSI_PERSONALITY(QED_LEADING_HWFN(cdev)) ||
+ QED_IS_NVMETCP_PERSONALITY(QED_LEADING_HWFN(cdev))) &&
+ (QED_AFFIN_HWFN(cdev) != QED_LEADING_HWFN(cdev));
+}
+
+static int __qed_ll2_stop(struct qed_hwfn *p_hwfn)
+{
+ struct qed_dev *cdev = p_hwfn->cdev;
+ int rc;
+
+ rc = qed_ll2_terminate_connection(p_hwfn, cdev->ll2->handle);
+ if (rc)
+ DP_INFO(cdev, "Failed to terminate LL2 connection\n");
+
+ qed_ll2_release_connection(p_hwfn, cdev->ll2->handle);
+
+ return rc;
+}
+
+static int qed_ll2_stop(struct qed_dev *cdev)
+{
+ bool b_is_storage_eng1 = qed_ll2_is_storage_eng1(cdev);
+ struct qed_hwfn *p_hwfn = QED_AFFIN_HWFN(cdev);
+ int rc = 0, rc2 = 0;
+
+ if (cdev->ll2->handle == QED_LL2_UNUSED_HANDLE)
+ return 0;
+ if (!QED_IS_NVMETCP_PERSONALITY(p_hwfn))
+ qed_llh_remove_mac_filter(cdev, 0, cdev->ll2_mac_address);
+
+ qed_llh_remove_mac_filter(cdev, 0, cdev->ll2_mac_address);
+ eth_zero_addr(cdev->ll2_mac_address);
+
+ if (QED_IS_ISCSI_PERSONALITY(p_hwfn) || QED_IS_NVMETCP_PERSONALITY(p_hwfn))
+ qed_ll2_stop_ooo(p_hwfn);
+
+ /* In CMT mode, LL2 is always started on engine 0 for a storage PF */
+ if (b_is_storage_eng1) {
+ rc2 = __qed_ll2_stop(QED_LEADING_HWFN(cdev));
+ if (rc2)
+ DP_NOTICE(QED_LEADING_HWFN(cdev),
+ "Failed to stop LL2 on engine 0\n");
+ }
+
+ rc = __qed_ll2_stop(p_hwfn);
+ if (rc)
+ DP_NOTICE(p_hwfn, "Failed to stop LL2\n");
+
+ qed_ll2_kill_buffers(cdev);
+
+ cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
+
+ return rc | rc2;
+}
+
+static int __qed_ll2_start(struct qed_hwfn *p_hwfn,
+ struct qed_ll2_params *params)
+{
+ struct qed_ll2_buffer *buffer, *tmp_buffer;
+ struct qed_dev *cdev = p_hwfn->cdev;
+ enum qed_ll2_conn_type conn_type;
+ struct qed_ll2_acquire_data data;
+ int rc, rx_cnt;
+
+ switch (p_hwfn->hw_info.personality) {
+ case QED_PCI_FCOE:
+ conn_type = QED_LL2_TYPE_FCOE;
+ break;
+ case QED_PCI_ISCSI:
+ case QED_PCI_NVMETCP:
+ conn_type = QED_LL2_TYPE_TCP_ULP;
+ break;
+ case QED_PCI_ETH_ROCE:
+ conn_type = QED_LL2_TYPE_ROCE;
+ break;
+ default:
+
+ conn_type = QED_LL2_TYPE_TEST;
+ }
+
+ qed_ll2_set_conn_data(p_hwfn, &data, params, conn_type,
+ &cdev->ll2->handle, false);
+
+ rc = qed_ll2_acquire_connection(p_hwfn, &data);
+ if (rc) {
+ DP_INFO(p_hwfn, "Failed to acquire LL2 connection\n");
+ return rc;
+ }
+
+ rc = qed_ll2_establish_connection(p_hwfn, cdev->ll2->handle);
+ if (rc) {
+ DP_INFO(p_hwfn, "Failed to establish LL2 connection\n");
+ goto release_conn;
+ }
+
+ /* Post all Rx buffers to FW */
+ spin_lock_bh(&cdev->ll2->lock);
+ rx_cnt = cdev->ll2->rx_cnt;
+ list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list) {
+ rc = qed_ll2_post_rx_buffer(p_hwfn,
+ cdev->ll2->handle,
+ buffer->phys_addr, 0, buffer, 1);
+ if (rc) {
+ DP_INFO(p_hwfn,
+ "Failed to post an Rx buffer; Deleting it\n");
+ dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
+ cdev->ll2->rx_size, DMA_FROM_DEVICE);
+ kfree(buffer->data);
+ list_del(&buffer->list);
+ kfree(buffer);
+ } else {
+ rx_cnt++;
+ }
+ }
+ spin_unlock_bh(&cdev->ll2->lock);
+
+ if (rx_cnt == cdev->ll2->rx_cnt) {
+ DP_NOTICE(p_hwfn, "Failed passing even a single Rx buffer\n");
+ goto terminate_conn;
+ }
+ cdev->ll2->rx_cnt = rx_cnt;
+
+ return 0;
+
+terminate_conn:
+ qed_ll2_terminate_connection(p_hwfn, cdev->ll2->handle);
+release_conn:
+ qed_ll2_release_connection(p_hwfn, cdev->ll2->handle);
+ return rc;
+}
+
+static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
+{
+ bool b_is_storage_eng1 = qed_ll2_is_storage_eng1(cdev);
+ struct qed_hwfn *p_hwfn = QED_AFFIN_HWFN(cdev);
+ struct qed_ll2_buffer *buffer;
+ int rx_num_desc, i, rc;
+
+ if (!is_valid_ether_addr(params->ll2_mac_address)) {
+ DP_NOTICE(cdev, "Invalid Ethernet address\n");
+ return -EINVAL;
+ }
+
+ WARN_ON(!cdev->ll2->cbs);
+
+ /* Initialize LL2 locks & lists */
+ INIT_LIST_HEAD(&cdev->ll2->list);
+ spin_lock_init(&cdev->ll2->lock);
+
+ cdev->ll2->rx_size = PRM_DMA_PAD_BYTES_NUM + ETH_HLEN +
+ L1_CACHE_BYTES + params->mtu;
+
+ /* Allocate memory for LL2.
+ * In CMT mode, in case of a storage PF which is affintized to engine 1,
+ * LL2 is started also on engine 0 and thus we need twofold buffers.
+ */
+ rx_num_desc = QED_LL2_RX_SIZE * (b_is_storage_eng1 ? 2 : 1);
+ DP_INFO(cdev, "Allocating %d LL2 buffers of size %08x bytes\n",
+ rx_num_desc, cdev->ll2->rx_size);
+ for (i = 0; i < rx_num_desc; i++) {
+ buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!buffer) {
+ DP_INFO(cdev, "Failed to allocate LL2 buffers\n");
+ rc = -ENOMEM;
+ goto err0;
+ }
+
+ rc = qed_ll2_alloc_buffer(cdev, (u8 **)&buffer->data,
+ &buffer->phys_addr);
+ if (rc) {
+ kfree(buffer);
+ goto err0;
+ }
+
+ list_add_tail(&buffer->list, &cdev->ll2->list);
+ }
+
+ rc = __qed_ll2_start(p_hwfn, params);
+ if (rc) {
+ DP_NOTICE(cdev, "Failed to start LL2\n");
+ goto err0;
+ }
+
+ /* In CMT mode, always need to start LL2 on engine 0 for a storage PF,
+ * since broadcast/mutlicast packets are routed to engine 0.
+ */
+ if (b_is_storage_eng1) {
+ rc = __qed_ll2_start(QED_LEADING_HWFN(cdev), params);
+ if (rc) {
+ DP_NOTICE(QED_LEADING_HWFN(cdev),
+ "Failed to start LL2 on engine 0\n");
+ goto err1;
+ }
+ }
+
+ if (QED_IS_ISCSI_PERSONALITY(p_hwfn) || QED_IS_NVMETCP_PERSONALITY(p_hwfn)) {
+ DP_VERBOSE(cdev, QED_MSG_STORAGE, "Starting OOO LL2 queue\n");
+ rc = qed_ll2_start_ooo(p_hwfn, params);
+ if (rc) {
+ DP_NOTICE(cdev, "Failed to start OOO LL2\n");
+ goto err2;
+ }
+ }
+
+ if (!QED_IS_NVMETCP_PERSONALITY(p_hwfn)) {
+ rc = qed_llh_add_mac_filter(cdev, 0, params->ll2_mac_address);
+ if (rc) {
+ DP_NOTICE(cdev, "Failed to add an LLH filter\n");
+ goto err3;
+ }
+ }
+
+ ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address);
+
+ return 0;
+
+err3:
+ if (QED_IS_ISCSI_PERSONALITY(p_hwfn) || QED_IS_NVMETCP_PERSONALITY(p_hwfn))
+ qed_ll2_stop_ooo(p_hwfn);
+err2:
+ if (b_is_storage_eng1)
+ __qed_ll2_stop(QED_LEADING_HWFN(cdev));
+err1:
+ __qed_ll2_stop(p_hwfn);
+err0:
+ qed_ll2_kill_buffers(cdev);
+ cdev->ll2->handle = QED_LL2_UNUSED_HANDLE;
+ return rc;
+}
+
+static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
+ unsigned long xmit_flags)
+{
+ struct qed_hwfn *p_hwfn = QED_AFFIN_HWFN(cdev);
+ struct qed_ll2_tx_pkt_info pkt;
+ const skb_frag_t *frag;
+ u8 flags = 0, nr_frags;
+ int rc = -EINVAL, i;
+ dma_addr_t mapping;
+ u16 vlan = 0;
+
+ if (unlikely(skb->ip_summed != CHECKSUM_NONE)) {
+ DP_INFO(cdev, "Cannot transmit a checksummed packet\n");
+ return -EINVAL;
+ }
+
+ /* Cache number of fragments from SKB since SKB may be freed by
+ * the completion routine after calling qed_ll2_prepare_tx_packet()
+ */
+ nr_frags = skb_shinfo(skb)->nr_frags;
+
+ if (unlikely(1 + nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET)) {
+ DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n",
+ 1 + nr_frags);
+ return -EINVAL;
+ }
+
+ mapping = dma_map_single(&cdev->pdev->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
+ DP_NOTICE(cdev, "SKB mapping failed\n");
+ return -EINVAL;
+ }
+
+ /* Request HW to calculate IP csum */
+ if (!((vlan_get_protocol(skb) == htons(ETH_P_IPV6)) &&
+ ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
+ flags |= BIT(CORE_TX_BD_DATA_IP_CSUM_SHIFT);
+
+ if (skb_vlan_tag_present(skb)) {
+ vlan = skb_vlan_tag_get(skb);
+ flags |= BIT(CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT);
+ }
+
+ memset(&pkt, 0, sizeof(pkt));
+ pkt.num_of_bds = 1 + nr_frags;
+ pkt.vlan = vlan;
+ pkt.bd_flags = flags;
+ pkt.tx_dest = QED_LL2_TX_DEST_NW;
+ pkt.first_frag = mapping;
+ pkt.first_frag_len = skb->len;
+ pkt.cookie = skb;
+ if (test_bit(QED_MF_UFP_SPECIFIC, &cdev->mf_bits) &&
+ test_bit(QED_LL2_XMIT_FLAGS_FIP_DISCOVERY, &xmit_flags))
+ pkt.remove_stag = true;
+
+ /* qed_ll2_prepare_tx_packet() may actually send the packet if
+ * there are no fragments in the skb and subsequently the completion
+ * routine may run and free the SKB, so no dereferencing the SKB
+ * beyond this point unless skb has any fragments.
+ */
+ rc = qed_ll2_prepare_tx_packet(p_hwfn, cdev->ll2->handle,
+ &pkt, 1);
+ if (unlikely(rc))
+ goto err;
+
+ for (i = 0; i < nr_frags; i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+
+ mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0,
+ skb_frag_size(frag), DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) {
+ DP_NOTICE(cdev,
+ "Unable to map frag - dropping packet\n");
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ rc = qed_ll2_set_fragment_of_tx_packet(p_hwfn,
+ cdev->ll2->handle,
+ mapping,
+ skb_frag_size(frag));
+
+ /* if failed not much to do here, partial packet has been posted
+ * we can't free memory, will need to wait for completion
+ */
+ if (unlikely(rc))
+ goto err2;
+ }
+
+ return 0;
+
+err:
+ dma_unmap_single(&cdev->pdev->dev, mapping, skb->len, DMA_TO_DEVICE);
+err2:
+ return rc;
+}
+
+static int qed_ll2_stats(struct qed_dev *cdev, struct qed_ll2_stats *stats)
+{
+ bool b_is_storage_eng1 = qed_ll2_is_storage_eng1(cdev);
+ struct qed_hwfn *p_hwfn = QED_AFFIN_HWFN(cdev);
+ int rc;
+
+ if (!cdev->ll2)
+ return -EINVAL;
+
+ rc = qed_ll2_get_stats(p_hwfn, cdev->ll2->handle, stats);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to get LL2 stats\n");
+ return rc;
+ }
+
+ /* In CMT mode, LL2 is always started on engine 0 for a storage PF */
+ if (b_is_storage_eng1) {
+ rc = __qed_ll2_get_stats(QED_LEADING_HWFN(cdev),
+ cdev->ll2->handle, stats);
+ if (rc) {
+ DP_NOTICE(QED_LEADING_HWFN(cdev),
+ "Failed to get LL2 stats on engine 0\n");
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+const struct qed_ll2_ops qed_ll2_ops_pass = {
+ .start = &qed_ll2_start,
+ .stop = &qed_ll2_stop,
+ .start_xmit = &qed_ll2_start_xmit,
+ .register_cb_ops = &qed_ll2_register_cb_ops,
+ .get_stats = &qed_ll2_stats,
+};
+
+int qed_ll2_alloc_if(struct qed_dev *cdev)
+{
+ cdev->ll2 = kzalloc(sizeof(*cdev->ll2), GFP_KERNEL);
+ return cdev->ll2 ? 0 : -ENOMEM;
+}
+
+void qed_ll2_dealloc_if(struct qed_dev *cdev)
+{
+ kfree(cdev->ll2);
+ cdev->ll2 = NULL;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
new file mode 100644
index 000000000..a174c6fc6
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
@@ -0,0 +1,260 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ * Copyright (c) 2019-2020 Marvell International Ltd.
+ */
+
+#ifndef _QED_LL2_H
+#define _QED_LL2_H
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/qed/qed_chain.h>
+#include <linux/qed/qed_ll2_if.h>
+#include "qed.h"
+#include "qed_hsi.h"
+#include "qed_sp.h"
+
+#define QED_MAX_NUM_OF_LL2_CONNECTIONS (4)
+/* LL2 queues handles will be split as follows:
+ * first will be legacy queues, and then the ctx based queues.
+ */
+#define QED_MAX_NUM_OF_LL2_CONNS_PF (4)
+#define QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF (3)
+
+#define QED_MAX_NUM_OF_CTX_LL2_CONNS_PF \
+ (QED_MAX_NUM_OF_LL2_CONNS_PF - QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF)
+
+#define QED_LL2_LEGACY_CONN_BASE_PF 0
+#define QED_LL2_CTX_CONN_BASE_PF QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF
+
+struct qed_ll2_rx_packet {
+ struct list_head list_entry;
+ struct core_rx_bd_with_buff_len *rxq_bd;
+ dma_addr_t rx_buf_addr;
+ u16 buf_length;
+ void *cookie;
+ u8 placement_offset;
+ u16 parse_flags;
+ u16 packet_length;
+ u16 vlan;
+ u32 opaque_data[2];
+};
+
+struct qed_ll2_tx_packet {
+ struct list_head list_entry;
+ u16 bd_used;
+ bool notify_fw;
+ void *cookie;
+ /* Flexible Array of bds_set determined by max_bds_per_packet */
+ struct {
+ struct core_tx_bd *txq_bd;
+ dma_addr_t tx_frag;
+ u16 frag_len;
+ } bds_set[];
+};
+
+struct qed_ll2_rx_queue {
+ /* Lock protecting the Rx queue manipulation */
+ spinlock_t lock;
+ struct qed_chain rxq_chain;
+ struct qed_chain rcq_chain;
+ u8 rx_sb_index;
+ u8 ctx_based;
+ bool b_cb_registered;
+ __le16 *p_fw_cons;
+ struct list_head active_descq;
+ struct list_head free_descq;
+ struct list_head posting_descq;
+ struct qed_ll2_rx_packet *descq_array;
+ void __iomem *set_prod_addr;
+ struct core_pwm_prod_update_data db_data;
+};
+
+struct qed_ll2_tx_queue {
+ /* Lock protecting the Tx queue manipulation */
+ spinlock_t lock;
+ struct qed_chain txq_chain;
+ u8 tx_sb_index;
+ bool b_cb_registered;
+ __le16 *p_fw_cons;
+ struct list_head active_descq;
+ struct list_head free_descq;
+ struct list_head sending_descq;
+ u16 cur_completing_bd_idx;
+ void __iomem *doorbell_addr;
+ struct core_db_data db_msg;
+ u16 bds_idx;
+ u16 cur_send_frag_num;
+ u16 cur_completing_frag_num;
+ bool b_completing_packet;
+ void *descq_mem; /* memory for variable sized qed_ll2_tx_packet*/
+ struct qed_ll2_tx_packet *cur_send_packet;
+ struct qed_ll2_tx_packet cur_completing_packet;
+};
+
+struct qed_ll2_info {
+ /* Lock protecting the state of LL2 */
+ struct mutex mutex;
+
+ struct qed_ll2_acquire_data_inputs input;
+ u32 cid;
+ u8 my_id;
+ u8 queue_id;
+ u8 tx_stats_id;
+ bool b_active;
+ enum core_tx_dest tx_dest;
+ u8 tx_stats_en;
+ bool main_func_queue;
+ struct qed_ll2_cbs cbs;
+ struct qed_ll2_rx_queue rx_queue;
+ struct qed_ll2_tx_queue tx_queue;
+};
+
+extern const struct qed_ll2_ops qed_ll2_ops_pass;
+
+/**
+ * qed_ll2_acquire_connection(): Allocate resources,
+ * starts rx & tx (if relevant) queues pair.
+ * Provides connecion handler as output
+ * parameter.
+ *
+ * @cxt: Pointer to the hw-function [opaque to some].
+ * @data: Describes connection parameters.
+ *
+ * Return: Int.
+ */
+int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data);
+
+/**
+ * qed_ll2_establish_connection(): start previously allocated LL2 queues pair
+ *
+ * @cxt: Pointer to the hw-function [opaque to some].
+ * @connection_handle: LL2 connection's handle obtained from
+ * qed_ll2_require_connection.
+ *
+ * Return: 0 on success, failure otherwise.
+ */
+int qed_ll2_establish_connection(void *cxt, u8 connection_handle);
+
+/**
+ * qed_ll2_post_rx_buffer(): Submit buffers to LL2 Rx queue.
+ *
+ * @cxt: Pointer to the hw-function [opaque to some].
+ * @connection_handle: LL2 connection's handle obtained from
+ * qed_ll2_require_connection.
+ * @addr: RX (physical address) buffers to submit.
+ * @buf_len: Buffer Len.
+ * @cookie: Cookie.
+ * @notify_fw: Produce corresponding Rx BD immediately.
+ *
+ * Return: 0 on success, failure otherwise.
+ */
+int qed_ll2_post_rx_buffer(void *cxt,
+ u8 connection_handle,
+ dma_addr_t addr,
+ u16 buf_len, void *cookie, u8 notify_fw);
+
+/**
+ * qed_ll2_prepare_tx_packet(): Request for start Tx BD
+ * to prepare Tx packet submission to FW.
+ *
+ * @cxt: Pointer to the hw-function [opaque to some].
+ * @connection_handle: Connection handle.
+ * @pkt: Info regarding the tx packet.
+ * @notify_fw: Issue doorbell to fw for this packet.
+ *
+ * Return: 0 on success, failure otherwise.
+ */
+int qed_ll2_prepare_tx_packet(void *cxt,
+ u8 connection_handle,
+ struct qed_ll2_tx_pkt_info *pkt,
+ bool notify_fw);
+
+/**
+ * qed_ll2_release_connection(): Releases resources allocated for LL2
+ * connection.
+ *
+ * @cxt: Pointer to the hw-function [opaque to some].
+ * @connection_handle: LL2 connection's handle obtained from
+ * qed_ll2_require_connection.
+ *
+ * Return: Void.
+ */
+void qed_ll2_release_connection(void *cxt, u8 connection_handle);
+
+/**
+ * qed_ll2_set_fragment_of_tx_packet(): Provides fragments to fill
+ * Tx BD of BDs requested by
+ * qed_ll2_prepare_tx_packet
+ *
+ * @cxt: Pointer to the hw-function [opaque to some].
+ * @connection_handle: LL2 connection's handle obtained from
+ * qed_ll2_require_connection.
+ * @addr: Address.
+ * @nbytes: Number of bytes.
+ *
+ * Return: 0 on success, failure otherwise.
+ */
+int qed_ll2_set_fragment_of_tx_packet(void *cxt,
+ u8 connection_handle,
+ dma_addr_t addr, u16 nbytes);
+
+/**
+ * qed_ll2_terminate_connection(): Stops Tx/Rx queues
+ *
+ * @cxt: Pointer to the hw-function [opaque to some].
+ * @connection_handle: LL2 connection's handle obtained from
+ * qed_ll2_require_connection.
+ *
+ * Return: 0 on success, failure otherwise.
+ */
+int qed_ll2_terminate_connection(void *cxt, u8 connection_handle);
+
+/**
+ * qed_ll2_get_stats(): Get LL2 queue's statistics
+ *
+ * @cxt: Pointer to the hw-function [opaque to some].
+ * @connection_handle: LL2 connection's handle obtained from
+ * qed_ll2_require_connection.
+ * @p_stats: Pointer Status.
+ *
+ * Return: 0 on success, failure otherwise.
+ */
+int qed_ll2_get_stats(void *cxt,
+ u8 connection_handle, struct qed_ll2_stats *p_stats);
+
+/**
+ * qed_ll2_alloc(): Allocates LL2 connections set.
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Return: Int.
+ */
+int qed_ll2_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * qed_ll2_setup(): Inits LL2 connections set.
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Return: Void.
+ *
+ */
+void qed_ll2_setup(struct qed_hwfn *p_hwfn);
+
+/**
+ * qed_ll2_free(): Releases LL2 connections set
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Return: Void.
+ *
+ */
+void qed_ll2_free(struct qed_hwfn *p_hwfn);
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
new file mode 100644
index 000000000..25d9c2542
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -0,0 +1,3215 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ * Copyright (c) 2019-2020 Marvell International Ltd.
+ */
+
+#include <linux/stddef.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <asm/byteorder.h>
+#include <linux/dma-mapping.h>
+#include <linux/string.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/ethtool.h>
+#include <linux/etherdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/crash_dump.h>
+#include <linux/crc32.h>
+#include <linux/qed/qed_if.h>
+#include <linux/qed/qed_ll2_if.h>
+#include <net/devlink.h>
+#include <linux/aer.h>
+#include <linux/phylink.h>
+
+#include "qed.h"
+#include "qed_sriov.h"
+#include "qed_sp.h"
+#include "qed_dev_api.h"
+#include "qed_ll2.h"
+#include "qed_fcoe.h"
+#include "qed_iscsi.h"
+
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+#include "qed_hw.h"
+#include "qed_selftest.h"
+#include "qed_debug.h"
+#include "qed_devlink.h"
+
+#define QED_ROCE_QPS (8192)
+#define QED_ROCE_DPIS (8)
+#define QED_RDMA_SRQS QED_ROCE_QPS
+#define QED_NVM_CFG_GET_FLAGS 0xA
+#define QED_NVM_CFG_GET_PF_FLAGS 0x1A
+#define QED_NVM_CFG_MAX_ATTRS 50
+
+static char version[] =
+ "QLogic FastLinQ 4xxxx Core Module qed\n";
+
+MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module");
+MODULE_LICENSE("GPL");
+
+#define FW_FILE_VERSION \
+ __stringify(FW_MAJOR_VERSION) "." \
+ __stringify(FW_MINOR_VERSION) "." \
+ __stringify(FW_REVISION_VERSION) "." \
+ __stringify(FW_ENGINEERING_VERSION)
+
+#define QED_FW_FILE_NAME \
+ "qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin"
+
+MODULE_FIRMWARE(QED_FW_FILE_NAME);
+
+/* MFW speed capabilities maps */
+
+struct qed_mfw_speed_map {
+ u32 mfw_val;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(caps);
+
+ const u32 *cap_arr;
+ u32 arr_size;
+};
+
+#define QED_MFW_SPEED_MAP(type, arr) \
+{ \
+ .mfw_val = (type), \
+ .cap_arr = (arr), \
+ .arr_size = ARRAY_SIZE(arr), \
+}
+
+static const u32 qed_mfw_ext_1g[] __initconst = {
+ ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+};
+
+static const u32 qed_mfw_ext_10g[] __initconst = {
+ ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
+ ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT,
+};
+
+static const u32 qed_mfw_ext_25g[] __initconst = {
+ ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+ ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+};
+
+static const u32 qed_mfw_ext_40g[] __initconst = {
+ ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
+ ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+ ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
+ ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
+};
+
+static const u32 qed_mfw_ext_50g_base_r[] __initconst = {
+ ETHTOOL_LINK_MODE_50000baseKR_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseCR_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseSR_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseDR_Full_BIT,
+};
+
+static const u32 qed_mfw_ext_50g_base_r2[] __initconst = {
+ ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
+};
+
+static const u32 qed_mfw_ext_100g_base_r2[] __initconst = {
+ ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT,
+};
+
+static const u32 qed_mfw_ext_100g_base_r4[] __initconst = {
+ ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
+};
+
+static struct qed_mfw_speed_map qed_mfw_ext_maps[] __ro_after_init = {
+ QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_1G, qed_mfw_ext_1g),
+ QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_10G, qed_mfw_ext_10g),
+ QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_25G, qed_mfw_ext_25g),
+ QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_40G, qed_mfw_ext_40g),
+ QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_50G_BASE_R,
+ qed_mfw_ext_50g_base_r),
+ QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_50G_BASE_R2,
+ qed_mfw_ext_50g_base_r2),
+ QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_100G_BASE_R2,
+ qed_mfw_ext_100g_base_r2),
+ QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_100G_BASE_R4,
+ qed_mfw_ext_100g_base_r4),
+};
+
+static const u32 qed_mfw_legacy_1g[] __initconst = {
+ ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+};
+
+static const u32 qed_mfw_legacy_10g[] __initconst = {
+ ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
+ ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT,
+};
+
+static const u32 qed_mfw_legacy_20g[] __initconst = {
+ ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
+};
+
+static const u32 qed_mfw_legacy_25g[] __initconst = {
+ ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+ ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+};
+
+static const u32 qed_mfw_legacy_40g[] __initconst = {
+ ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
+ ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+ ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
+ ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
+};
+
+static const u32 qed_mfw_legacy_50g[] __initconst = {
+ ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
+};
+
+static const u32 qed_mfw_legacy_bb_100g[] __initconst = {
+ ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
+};
+
+static struct qed_mfw_speed_map qed_mfw_legacy_maps[] __ro_after_init = {
+ QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G,
+ qed_mfw_legacy_1g),
+ QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G,
+ qed_mfw_legacy_10g),
+ QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G,
+ qed_mfw_legacy_20g),
+ QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G,
+ qed_mfw_legacy_25g),
+ QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G,
+ qed_mfw_legacy_40g),
+ QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G,
+ qed_mfw_legacy_50g),
+ QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G,
+ qed_mfw_legacy_bb_100g),
+};
+
+static void __init qed_mfw_speed_map_populate(struct qed_mfw_speed_map *map)
+{
+ linkmode_set_bit_array(map->cap_arr, map->arr_size, map->caps);
+
+ map->cap_arr = NULL;
+ map->arr_size = 0;
+}
+
+static void __init qed_mfw_speed_maps_init(void)
+{
+ u32 i;
+
+ for (i = 0; i < ARRAY_SIZE(qed_mfw_ext_maps); i++)
+ qed_mfw_speed_map_populate(qed_mfw_ext_maps + i);
+
+ for (i = 0; i < ARRAY_SIZE(qed_mfw_legacy_maps); i++)
+ qed_mfw_speed_map_populate(qed_mfw_legacy_maps + i);
+}
+
+static int __init qed_init(void)
+{
+ pr_info("%s", version);
+
+ qed_mfw_speed_maps_init();
+
+ return 0;
+}
+module_init(qed_init);
+
+static void __exit qed_exit(void)
+{
+ /* To prevent marking this module as "permanent" */
+}
+module_exit(qed_exit);
+
+static void qed_free_pci(struct qed_dev *cdev)
+{
+ struct pci_dev *pdev = cdev->pdev;
+
+ pci_disable_pcie_error_reporting(pdev);
+
+ if (cdev->doorbells && cdev->db_size)
+ iounmap(cdev->doorbells);
+ if (cdev->regview)
+ iounmap(cdev->regview);
+ if (atomic_read(&pdev->enable_cnt) == 1)
+ pci_release_regions(pdev);
+
+ pci_disable_device(pdev);
+}
+
+#define PCI_REVISION_ID_ERROR_VAL 0xff
+
+/* Performs PCI initializations as well as initializing PCI-related parameters
+ * in the device structrue. Returns 0 in case of success.
+ */
+static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev)
+{
+ u8 rev_id;
+ int rc;
+
+ cdev->pdev = pdev;
+
+ rc = pci_enable_device(pdev);
+ if (rc) {
+ DP_NOTICE(cdev, "Cannot enable PCI device\n");
+ goto err0;
+ }
+
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+ DP_NOTICE(cdev, "No memory region found in bar #0\n");
+ rc = -EIO;
+ goto err1;
+ }
+
+ if (IS_PF(cdev) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
+ DP_NOTICE(cdev, "No memory region found in bar #2\n");
+ rc = -EIO;
+ goto err1;
+ }
+
+ if (atomic_read(&pdev->enable_cnt) == 1) {
+ rc = pci_request_regions(pdev, "qed");
+ if (rc) {
+ DP_NOTICE(cdev,
+ "Failed to request PCI memory resources\n");
+ goto err1;
+ }
+ pci_set_master(pdev);
+ pci_save_state(pdev);
+ }
+
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
+ if (rev_id == PCI_REVISION_ID_ERROR_VAL) {
+ DP_NOTICE(cdev,
+ "Detected PCI device error [rev_id 0x%x]. Probably due to prior indication. Aborting.\n",
+ rev_id);
+ rc = -ENODEV;
+ goto err2;
+ }
+ if (!pci_is_pcie(pdev)) {
+ DP_NOTICE(cdev, "The bus is not PCI Express\n");
+ rc = -EIO;
+ goto err2;
+ }
+
+ cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
+ if (IS_PF(cdev) && !cdev->pci_params.pm_cap)
+ DP_NOTICE(cdev, "Cannot find power management capability\n");
+
+ rc = dma_set_mask_and_coherent(&cdev->pdev->dev, DMA_BIT_MASK(64));
+ if (rc) {
+ DP_NOTICE(cdev, "Can't request DMA addresses\n");
+ rc = -EIO;
+ goto err2;
+ }
+
+ cdev->pci_params.mem_start = pci_resource_start(pdev, 0);
+ cdev->pci_params.mem_end = pci_resource_end(pdev, 0);
+ cdev->pci_params.irq = pdev->irq;
+
+ cdev->regview = pci_ioremap_bar(pdev, 0);
+ if (!cdev->regview) {
+ DP_NOTICE(cdev, "Cannot map register space, aborting\n");
+ rc = -ENOMEM;
+ goto err2;
+ }
+
+ cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2);
+ cdev->db_size = pci_resource_len(cdev->pdev, 2);
+ if (!cdev->db_size) {
+ if (IS_PF(cdev)) {
+ DP_NOTICE(cdev, "No Doorbell bar available\n");
+ return -EINVAL;
+ } else {
+ return 0;
+ }
+ }
+
+ cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size);
+
+ if (!cdev->doorbells) {
+ DP_NOTICE(cdev, "Cannot map doorbell space\n");
+ return -ENOMEM;
+ }
+
+ /* AER (Advanced Error reporting) configuration */
+ rc = pci_enable_pcie_error_reporting(pdev);
+ if (rc)
+ DP_VERBOSE(cdev, NETIF_MSG_DRV,
+ "Failed to configure PCIe AER [%d]\n", rc);
+
+ return 0;
+
+err2:
+ pci_release_regions(pdev);
+err1:
+ pci_disable_device(pdev);
+err0:
+ return rc;
+}
+
+int qed_fill_dev_info(struct qed_dev *cdev,
+ struct qed_dev_info *dev_info)
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_hw_info *hw_info = &p_hwfn->hw_info;
+ struct qed_tunnel_info *tun = &cdev->tunnel;
+ struct qed_ptt *ptt;
+
+ memset(dev_info, 0, sizeof(struct qed_dev_info));
+
+ if (tun->vxlan.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
+ tun->vxlan.b_mode_enabled)
+ dev_info->vxlan_enable = true;
+
+ if (tun->l2_gre.b_mode_enabled && tun->ip_gre.b_mode_enabled &&
+ tun->l2_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
+ tun->ip_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN)
+ dev_info->gre_enable = true;
+
+ if (tun->l2_geneve.b_mode_enabled && tun->ip_geneve.b_mode_enabled &&
+ tun->l2_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
+ tun->ip_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN)
+ dev_info->geneve_enable = true;
+
+ dev_info->num_hwfns = cdev->num_hwfns;
+ dev_info->pci_mem_start = cdev->pci_params.mem_start;
+ dev_info->pci_mem_end = cdev->pci_params.mem_end;
+ dev_info->pci_irq = cdev->pci_params.irq;
+ dev_info->rdma_supported = QED_IS_RDMA_PERSONALITY(p_hwfn);
+ dev_info->dev_type = cdev->type;
+ ether_addr_copy(dev_info->hw_mac, hw_info->hw_mac_addr);
+
+ if (IS_PF(cdev)) {
+ dev_info->fw_major = FW_MAJOR_VERSION;
+ dev_info->fw_minor = FW_MINOR_VERSION;
+ dev_info->fw_rev = FW_REVISION_VERSION;
+ dev_info->fw_eng = FW_ENGINEERING_VERSION;
+ dev_info->b_inter_pf_switch = test_bit(QED_MF_INTER_PF_SWITCH,
+ &cdev->mf_bits);
+ if (!test_bit(QED_MF_DISABLE_ARFS, &cdev->mf_bits))
+ dev_info->b_arfs_capable = true;
+ dev_info->tx_switching = true;
+
+ if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME)
+ dev_info->wol_support = true;
+
+ dev_info->smart_an = qed_mcp_is_smart_an_supported(p_hwfn);
+ dev_info->esl = qed_mcp_is_esl_supported(p_hwfn);
+ dev_info->abs_pf_id = QED_LEADING_HWFN(cdev)->abs_pf_id;
+ } else {
+ qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major,
+ &dev_info->fw_minor, &dev_info->fw_rev,
+ &dev_info->fw_eng);
+ }
+
+ if (IS_PF(cdev)) {
+ ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
+ if (ptt) {
+ qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt,
+ &dev_info->mfw_rev, NULL);
+
+ qed_mcp_get_mbi_ver(QED_LEADING_HWFN(cdev), ptt,
+ &dev_info->mbi_version);
+
+ qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt,
+ &dev_info->flash_size);
+
+ qed_ptt_release(QED_LEADING_HWFN(cdev), ptt);
+ }
+ } else {
+ qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), NULL,
+ &dev_info->mfw_rev, NULL);
+ }
+
+ dev_info->mtu = hw_info->mtu;
+ cdev->common_dev_info = *dev_info;
+
+ return 0;
+}
+
+static void qed_free_cdev(struct qed_dev *cdev)
+{
+ kfree((void *)cdev);
+}
+
+static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev)
+{
+ struct qed_dev *cdev;
+
+ cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
+ if (!cdev)
+ return cdev;
+
+ qed_init_struct(cdev);
+
+ return cdev;
+}
+
+/* Sets the requested power state */
+static int qed_set_power_state(struct qed_dev *cdev, pci_power_t state)
+{
+ if (!cdev)
+ return -ENODEV;
+
+ DP_VERBOSE(cdev, NETIF_MSG_DRV, "Omitting Power state change\n");
+ return 0;
+}
+
+/* probing */
+static struct qed_dev *qed_probe(struct pci_dev *pdev,
+ struct qed_probe_params *params)
+{
+ struct qed_dev *cdev;
+ int rc;
+
+ cdev = qed_alloc_cdev(pdev);
+ if (!cdev)
+ goto err0;
+
+ cdev->drv_type = DRV_ID_DRV_TYPE_LINUX;
+ cdev->protocol = params->protocol;
+
+ if (params->is_vf)
+ cdev->b_is_vf = true;
+
+ qed_init_dp(cdev, params->dp_module, params->dp_level);
+
+ cdev->recov_in_prog = params->recov_in_prog;
+
+ rc = qed_init_pci(cdev, pdev);
+ if (rc) {
+ DP_ERR(cdev, "init pci failed\n");
+ goto err1;
+ }
+ DP_INFO(cdev, "PCI init completed successfully\n");
+
+ rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT);
+ if (rc) {
+ DP_ERR(cdev, "hw prepare failed\n");
+ goto err2;
+ }
+
+ DP_INFO(cdev, "%s completed successfully\n", __func__);
+
+ return cdev;
+
+err2:
+ qed_free_pci(cdev);
+err1:
+ qed_free_cdev(cdev);
+err0:
+ return NULL;
+}
+
+static void qed_remove(struct qed_dev *cdev)
+{
+ if (!cdev)
+ return;
+
+ qed_hw_remove(cdev);
+
+ qed_free_pci(cdev);
+
+ qed_set_power_state(cdev, PCI_D3hot);
+
+ qed_free_cdev(cdev);
+}
+
+static void qed_disable_msix(struct qed_dev *cdev)
+{
+ if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
+ pci_disable_msix(cdev->pdev);
+ kfree(cdev->int_params.msix_table);
+ } else if (cdev->int_params.out.int_mode == QED_INT_MODE_MSI) {
+ pci_disable_msi(cdev->pdev);
+ }
+
+ memset(&cdev->int_params.out, 0, sizeof(struct qed_int_param));
+}
+
+static int qed_enable_msix(struct qed_dev *cdev,
+ struct qed_int_params *int_params)
+{
+ int i, rc, cnt;
+
+ cnt = int_params->in.num_vectors;
+
+ for (i = 0; i < cnt; i++)
+ int_params->msix_table[i].entry = i;
+
+ rc = pci_enable_msix_range(cdev->pdev, int_params->msix_table,
+ int_params->in.min_msix_cnt, cnt);
+ if (rc < cnt && rc >= int_params->in.min_msix_cnt &&
+ (rc % cdev->num_hwfns)) {
+ pci_disable_msix(cdev->pdev);
+
+ /* If fastpath is initialized, we need at least one interrupt
+ * per hwfn [and the slow path interrupts]. New requested number
+ * should be a multiple of the number of hwfns.
+ */
+ cnt = (rc / cdev->num_hwfns) * cdev->num_hwfns;
+ DP_NOTICE(cdev,
+ "Trying to enable MSI-X with less vectors (%d out of %d)\n",
+ cnt, int_params->in.num_vectors);
+ rc = pci_enable_msix_exact(cdev->pdev, int_params->msix_table,
+ cnt);
+ if (!rc)
+ rc = cnt;
+ }
+
+ /* For VFs, we should return with an error in case we didn't get the
+ * exact number of msix vectors as we requested.
+ * Not doing that will lead to a crash when starting queues for
+ * this VF.
+ */
+ if ((IS_PF(cdev) && rc > 0) || (IS_VF(cdev) && rc == cnt)) {
+ /* MSI-x configuration was achieved */
+ int_params->out.int_mode = QED_INT_MODE_MSIX;
+ int_params->out.num_vectors = rc;
+ rc = 0;
+ } else {
+ DP_NOTICE(cdev,
+ "Failed to enable MSI-X [Requested %d vectors][rc %d]\n",
+ cnt, rc);
+ }
+
+ return rc;
+}
+
+/* This function outputs the int mode and the number of enabled msix vector */
+static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode)
+{
+ struct qed_int_params *int_params = &cdev->int_params;
+ struct msix_entry *tbl;
+ int rc = 0, cnt;
+
+ switch (int_params->in.int_mode) {
+ case QED_INT_MODE_MSIX:
+ /* Allocate MSIX table */
+ cnt = int_params->in.num_vectors;
+ int_params->msix_table = kcalloc(cnt, sizeof(*tbl), GFP_KERNEL);
+ if (!int_params->msix_table) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Enable MSIX */
+ rc = qed_enable_msix(cdev, int_params);
+ if (!rc)
+ goto out;
+
+ DP_NOTICE(cdev, "Failed to enable MSI-X\n");
+ kfree(int_params->msix_table);
+ if (force_mode)
+ goto out;
+ fallthrough;
+
+ case QED_INT_MODE_MSI:
+ if (cdev->num_hwfns == 1) {
+ rc = pci_enable_msi(cdev->pdev);
+ if (!rc) {
+ int_params->out.int_mode = QED_INT_MODE_MSI;
+ goto out;
+ }
+
+ DP_NOTICE(cdev, "Failed to enable MSI\n");
+ if (force_mode)
+ goto out;
+ }
+ fallthrough;
+
+ case QED_INT_MODE_INTA:
+ int_params->out.int_mode = QED_INT_MODE_INTA;
+ rc = 0;
+ goto out;
+ default:
+ DP_NOTICE(cdev, "Unknown int_mode value %d\n",
+ int_params->in.int_mode);
+ rc = -EINVAL;
+ }
+
+out:
+ if (!rc)
+ DP_INFO(cdev, "Using %s interrupts\n",
+ int_params->out.int_mode == QED_INT_MODE_INTA ?
+ "INTa" : int_params->out.int_mode == QED_INT_MODE_MSI ?
+ "MSI" : "MSIX");
+ cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE;
+
+ return rc;
+}
+
+static void qed_simd_handler_config(struct qed_dev *cdev, void *token,
+ int index, void(*handler)(void *))
+{
+ struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
+ int relative_idx = index / cdev->num_hwfns;
+
+ hwfn->simd_proto_handler[relative_idx].func = handler;
+ hwfn->simd_proto_handler[relative_idx].token = token;
+}
+
+static void qed_simd_handler_clean(struct qed_dev *cdev, int index)
+{
+ struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
+ int relative_idx = index / cdev->num_hwfns;
+
+ memset(&hwfn->simd_proto_handler[relative_idx], 0,
+ sizeof(struct qed_simd_fp_handler));
+}
+
+static irqreturn_t qed_msix_sp_int(int irq, void *tasklet)
+{
+ tasklet_schedule((struct tasklet_struct *)tasklet);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qed_single_int(int irq, void *dev_instance)
+{
+ struct qed_dev *cdev = (struct qed_dev *)dev_instance;
+ struct qed_hwfn *hwfn;
+ irqreturn_t rc = IRQ_NONE;
+ u64 status;
+ int i, j;
+
+ for (i = 0; i < cdev->num_hwfns; i++) {
+ status = qed_int_igu_read_sisr_reg(&cdev->hwfns[i]);
+
+ if (!status)
+ continue;
+
+ hwfn = &cdev->hwfns[i];
+
+ /* Slowpath interrupt */
+ if (unlikely(status & 0x1)) {
+ tasklet_schedule(&hwfn->sp_dpc);
+ status &= ~0x1;
+ rc = IRQ_HANDLED;
+ }
+
+ /* Fastpath interrupts */
+ for (j = 0; j < 64; j++) {
+ if ((0x2ULL << j) & status) {
+ struct qed_simd_fp_handler *p_handler =
+ &hwfn->simd_proto_handler[j];
+
+ if (p_handler->func)
+ p_handler->func(p_handler->token);
+ else
+ DP_NOTICE(hwfn,
+ "Not calling fastpath handler as it is NULL [handler #%d, status 0x%llx]\n",
+ j, status);
+
+ status &= ~(0x2ULL << j);
+ rc = IRQ_HANDLED;
+ }
+ }
+
+ if (unlikely(status))
+ DP_VERBOSE(hwfn, NETIF_MSG_INTR,
+ "got an unknown interrupt status 0x%llx\n",
+ status);
+ }
+
+ return rc;
+}
+
+int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
+{
+ struct qed_dev *cdev = hwfn->cdev;
+ u32 int_mode;
+ int rc = 0;
+ u8 id;
+
+ int_mode = cdev->int_params.out.int_mode;
+ if (int_mode == QED_INT_MODE_MSIX) {
+ id = hwfn->my_id;
+ snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x",
+ id, cdev->pdev->bus->number,
+ PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
+ rc = request_irq(cdev->int_params.msix_table[id].vector,
+ qed_msix_sp_int, 0, hwfn->name, &hwfn->sp_dpc);
+ } else {
+ unsigned long flags = 0;
+
+ snprintf(cdev->name, NAME_SIZE, "%02x:%02x.%02x",
+ cdev->pdev->bus->number, PCI_SLOT(cdev->pdev->devfn),
+ PCI_FUNC(cdev->pdev->devfn));
+
+ if (cdev->int_params.out.int_mode == QED_INT_MODE_INTA)
+ flags |= IRQF_SHARED;
+
+ rc = request_irq(cdev->pdev->irq, qed_single_int,
+ flags, cdev->name, cdev);
+ }
+
+ if (rc)
+ DP_NOTICE(cdev, "request_irq failed, rc = %d\n", rc);
+ else
+ DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP),
+ "Requested slowpath %s\n",
+ (int_mode == QED_INT_MODE_MSIX) ? "MSI-X" : "IRQ");
+
+ return rc;
+}
+
+static void qed_slowpath_tasklet_flush(struct qed_hwfn *p_hwfn)
+{
+ /* Calling the disable function will make sure that any
+ * currently-running function is completed. The following call to the
+ * enable function makes this sequence a flush-like operation.
+ */
+ if (p_hwfn->b_sp_dpc_enabled) {
+ tasklet_disable(&p_hwfn->sp_dpc);
+ tasklet_enable(&p_hwfn->sp_dpc);
+ }
+}
+
+void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn)
+{
+ struct qed_dev *cdev = p_hwfn->cdev;
+ u8 id = p_hwfn->my_id;
+ u32 int_mode;
+
+ int_mode = cdev->int_params.out.int_mode;
+ if (int_mode == QED_INT_MODE_MSIX)
+ synchronize_irq(cdev->int_params.msix_table[id].vector);
+ else
+ synchronize_irq(cdev->pdev->irq);
+
+ qed_slowpath_tasklet_flush(p_hwfn);
+}
+
+static void qed_slowpath_irq_free(struct qed_dev *cdev)
+{
+ int i;
+
+ if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
+ for_each_hwfn(cdev, i) {
+ if (!cdev->hwfns[i].b_int_requested)
+ break;
+ free_irq(cdev->int_params.msix_table[i].vector,
+ &cdev->hwfns[i].sp_dpc);
+ }
+ } else {
+ if (QED_LEADING_HWFN(cdev)->b_int_requested)
+ free_irq(cdev->pdev->irq, cdev);
+ }
+ qed_int_disable_post_isr_release(cdev);
+}
+
+static int qed_nic_stop(struct qed_dev *cdev)
+{
+ int i, rc;
+
+ rc = qed_hw_stop(cdev);
+
+ for (i = 0; i < cdev->num_hwfns; i++) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ if (p_hwfn->b_sp_dpc_enabled) {
+ tasklet_disable(&p_hwfn->sp_dpc);
+ p_hwfn->b_sp_dpc_enabled = false;
+ DP_VERBOSE(cdev, NETIF_MSG_IFDOWN,
+ "Disabled sp tasklet [hwfn %d] at %p\n",
+ i, &p_hwfn->sp_dpc);
+ }
+ }
+
+ qed_dbg_pf_exit(cdev);
+
+ return rc;
+}
+
+static int qed_nic_setup(struct qed_dev *cdev)
+{
+ int rc, i;
+
+ /* Determine if interface is going to require LL2 */
+ if (QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH) {
+ for (i = 0; i < cdev->num_hwfns; i++) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ p_hwfn->using_ll2 = true;
+ }
+ }
+
+ rc = qed_resc_alloc(cdev);
+ if (rc)
+ return rc;
+
+ DP_INFO(cdev, "Allocated qed resources\n");
+
+ qed_resc_setup(cdev);
+
+ return rc;
+}
+
+static int qed_set_int_fp(struct qed_dev *cdev, u16 cnt)
+{
+ int limit = 0;
+
+ /* Mark the fastpath as free/used */
+ cdev->int_params.fp_initialized = cnt ? true : false;
+
+ if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX)
+ limit = cdev->num_hwfns * 63;
+ else if (cdev->int_params.fp_msix_cnt)
+ limit = cdev->int_params.fp_msix_cnt;
+
+ if (!limit)
+ return -ENOMEM;
+
+ return min_t(int, cnt, limit);
+}
+
+static int qed_get_int_fp(struct qed_dev *cdev, struct qed_int_info *info)
+{
+ memset(info, 0, sizeof(struct qed_int_info));
+
+ if (!cdev->int_params.fp_initialized) {
+ DP_INFO(cdev,
+ "Protocol driver requested interrupt information, but its support is not yet configured\n");
+ return -EINVAL;
+ }
+
+ /* Need to expose only MSI-X information; Single IRQ is handled solely
+ * by qed.
+ */
+ if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
+ int msix_base = cdev->int_params.fp_msix_base;
+
+ info->msix_cnt = cdev->int_params.fp_msix_cnt;
+ info->msix = &cdev->int_params.msix_table[msix_base];
+ }
+
+ return 0;
+}
+
+static int qed_slowpath_setup_int(struct qed_dev *cdev,
+ enum qed_int_mode int_mode)
+{
+ struct qed_sb_cnt_info sb_cnt_info;
+ int num_l2_queues = 0;
+ int rc;
+ int i;
+
+ if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
+ DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
+ return -EINVAL;
+ }
+
+ memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
+ cdev->int_params.in.int_mode = int_mode;
+ for_each_hwfn(cdev, i) {
+ memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
+ qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info);
+ cdev->int_params.in.num_vectors += sb_cnt_info.cnt;
+ cdev->int_params.in.num_vectors++; /* slowpath */
+ }
+
+ /* We want a minimum of one slowpath and one fastpath vector per hwfn */
+ cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2;
+
+ if (is_kdump_kernel()) {
+ DP_INFO(cdev,
+ "Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n",
+ cdev->int_params.in.min_msix_cnt);
+ cdev->int_params.in.num_vectors =
+ cdev->int_params.in.min_msix_cnt;
+ }
+
+ rc = qed_set_int_mode(cdev, false);
+ if (rc) {
+ DP_ERR(cdev, "%s ERR\n", __func__);
+ return rc;
+ }
+
+ cdev->int_params.fp_msix_base = cdev->num_hwfns;
+ cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors -
+ cdev->num_hwfns;
+
+ if (!IS_ENABLED(CONFIG_QED_RDMA) ||
+ !QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev)))
+ return 0;
+
+ for_each_hwfn(cdev, i)
+ num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE);
+
+ DP_VERBOSE(cdev, QED_MSG_RDMA,
+ "cdev->int_params.fp_msix_cnt=%d num_l2_queues=%d\n",
+ cdev->int_params.fp_msix_cnt, num_l2_queues);
+
+ if (cdev->int_params.fp_msix_cnt > num_l2_queues) {
+ cdev->int_params.rdma_msix_cnt =
+ (cdev->int_params.fp_msix_cnt - num_l2_queues)
+ / cdev->num_hwfns;
+ cdev->int_params.rdma_msix_base =
+ cdev->int_params.fp_msix_base + num_l2_queues;
+ cdev->int_params.fp_msix_cnt = num_l2_queues;
+ } else {
+ cdev->int_params.rdma_msix_cnt = 0;
+ }
+
+ DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n",
+ cdev->int_params.rdma_msix_cnt,
+ cdev->int_params.rdma_msix_base);
+
+ return 0;
+}
+
+static int qed_slowpath_vf_setup_int(struct qed_dev *cdev)
+{
+ int rc;
+
+ memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
+ cdev->int_params.in.int_mode = QED_INT_MODE_MSIX;
+
+ qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev),
+ &cdev->int_params.in.num_vectors);
+ if (cdev->num_hwfns > 1) {
+ u8 vectors = 0;
+
+ qed_vf_get_num_rxqs(&cdev->hwfns[1], &vectors);
+ cdev->int_params.in.num_vectors += vectors;
+ }
+
+ /* We want a minimum of one fastpath vector per vf hwfn */
+ cdev->int_params.in.min_msix_cnt = cdev->num_hwfns;
+
+ rc = qed_set_int_mode(cdev, true);
+ if (rc)
+ return rc;
+
+ cdev->int_params.fp_msix_base = 0;
+ cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors;
+
+ return 0;
+}
+
+u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len,
+ u8 *input_buf, u32 max_size, u8 *unzip_buf)
+{
+ int rc;
+
+ p_hwfn->stream->next_in = input_buf;
+ p_hwfn->stream->avail_in = input_len;
+ p_hwfn->stream->next_out = unzip_buf;
+ p_hwfn->stream->avail_out = max_size;
+
+ rc = zlib_inflateInit2(p_hwfn->stream, MAX_WBITS);
+
+ if (rc != Z_OK) {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "zlib init failed, rc = %d\n",
+ rc);
+ return 0;
+ }
+
+ rc = zlib_inflate(p_hwfn->stream, Z_FINISH);
+ zlib_inflateEnd(p_hwfn->stream);
+
+ if (rc != Z_OK && rc != Z_STREAM_END) {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "FW unzip error: %s, rc=%d\n",
+ p_hwfn->stream->msg, rc);
+ return 0;
+ }
+
+ return p_hwfn->stream->total_out / 4;
+}
+
+static int qed_alloc_stream_mem(struct qed_dev *cdev)
+{
+ int i;
+ void *workspace;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ p_hwfn->stream = kzalloc(sizeof(*p_hwfn->stream), GFP_KERNEL);
+ if (!p_hwfn->stream)
+ return -ENOMEM;
+
+ workspace = vzalloc(zlib_inflate_workspacesize());
+ if (!workspace)
+ return -ENOMEM;
+ p_hwfn->stream->workspace = workspace;
+ }
+
+ return 0;
+}
+
+static void qed_free_stream_mem(struct qed_dev *cdev)
+{
+ int i;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ if (!p_hwfn->stream)
+ return;
+
+ vfree(p_hwfn->stream->workspace);
+ kfree(p_hwfn->stream);
+ }
+}
+
+static void qed_update_pf_params(struct qed_dev *cdev,
+ struct qed_pf_params *params)
+{
+ int i;
+
+ if (IS_ENABLED(CONFIG_QED_RDMA)) {
+ params->rdma_pf_params.num_qps = QED_ROCE_QPS;
+ params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
+ params->rdma_pf_params.num_srqs = QED_RDMA_SRQS;
+ /* divide by 3 the MRs to avoid MF ILT overflow */
+ params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
+ }
+
+ if (cdev->num_hwfns > 1 || IS_VF(cdev))
+ params->eth_pf_params.num_arfs_filters = 0;
+
+ /* In case we might support RDMA, don't allow qede to be greedy
+ * with the L2 contexts. Allow for 64 queues [rx, tx cos, xdp]
+ * per hwfn.
+ */
+ if (QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) {
+ u16 *num_cons;
+
+ num_cons = &params->eth_pf_params.num_cons;
+ *num_cons = min_t(u16, *num_cons, QED_MAX_L2_CONS);
+ }
+
+ for (i = 0; i < cdev->num_hwfns; i++) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ p_hwfn->pf_params = *params;
+ }
+}
+
+#define QED_PERIODIC_DB_REC_COUNT 10
+#define QED_PERIODIC_DB_REC_INTERVAL_MS 100
+#define QED_PERIODIC_DB_REC_INTERVAL \
+ msecs_to_jiffies(QED_PERIODIC_DB_REC_INTERVAL_MS)
+
+static int qed_slowpath_delayed_work(struct qed_hwfn *hwfn,
+ enum qed_slowpath_wq_flag wq_flag,
+ unsigned long delay)
+{
+ if (!hwfn->slowpath_wq_active)
+ return -EINVAL;
+
+ /* Memory barrier for setting atomic bit */
+ smp_mb__before_atomic();
+ set_bit(wq_flag, &hwfn->slowpath_task_flags);
+ /* Memory barrier after setting atomic bit */
+ smp_mb__after_atomic();
+ queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, delay);
+
+ return 0;
+}
+
+void qed_periodic_db_rec_start(struct qed_hwfn *p_hwfn)
+{
+ /* Reset periodic Doorbell Recovery counter */
+ p_hwfn->periodic_db_rec_count = QED_PERIODIC_DB_REC_COUNT;
+
+ /* Don't schedule periodic Doorbell Recovery if already scheduled */
+ if (test_bit(QED_SLOWPATH_PERIODIC_DB_REC,
+ &p_hwfn->slowpath_task_flags))
+ return;
+
+ qed_slowpath_delayed_work(p_hwfn, QED_SLOWPATH_PERIODIC_DB_REC,
+ QED_PERIODIC_DB_REC_INTERVAL);
+}
+
+static void qed_slowpath_wq_stop(struct qed_dev *cdev)
+{
+ int i;
+
+ if (IS_VF(cdev))
+ return;
+
+ for_each_hwfn(cdev, i) {
+ if (!cdev->hwfns[i].slowpath_wq)
+ continue;
+
+ /* Stop queuing new delayed works */
+ cdev->hwfns[i].slowpath_wq_active = false;
+
+ cancel_delayed_work(&cdev->hwfns[i].slowpath_task);
+ destroy_workqueue(cdev->hwfns[i].slowpath_wq);
+ }
+}
+
+static void qed_slowpath_task(struct work_struct *work)
+{
+ struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
+ slowpath_task.work);
+ struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
+
+ if (!ptt) {
+ if (hwfn->slowpath_wq_active)
+ queue_delayed_work(hwfn->slowpath_wq,
+ &hwfn->slowpath_task, 0);
+
+ return;
+ }
+
+ if (test_and_clear_bit(QED_SLOWPATH_MFW_TLV_REQ,
+ &hwfn->slowpath_task_flags))
+ qed_mfw_process_tlv_req(hwfn, ptt);
+
+ if (test_and_clear_bit(QED_SLOWPATH_PERIODIC_DB_REC,
+ &hwfn->slowpath_task_flags)) {
+ /* skip qed_db_rec_handler during recovery/unload */
+ if (hwfn->cdev->recov_in_prog || !hwfn->slowpath_wq_active)
+ goto out;
+
+ qed_db_rec_handler(hwfn, ptt);
+ if (hwfn->periodic_db_rec_count--)
+ qed_slowpath_delayed_work(hwfn,
+ QED_SLOWPATH_PERIODIC_DB_REC,
+ QED_PERIODIC_DB_REC_INTERVAL);
+ }
+
+out:
+ qed_ptt_release(hwfn, ptt);
+}
+
+static int qed_slowpath_wq_start(struct qed_dev *cdev)
+{
+ struct qed_hwfn *hwfn;
+ char name[NAME_SIZE];
+ int i;
+
+ if (IS_VF(cdev))
+ return 0;
+
+ for_each_hwfn(cdev, i) {
+ hwfn = &cdev->hwfns[i];
+
+ snprintf(name, NAME_SIZE, "slowpath-%02x:%02x.%02x",
+ cdev->pdev->bus->number,
+ PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
+
+ hwfn->slowpath_wq = alloc_workqueue(name, 0, 0);
+ if (!hwfn->slowpath_wq) {
+ DP_NOTICE(hwfn, "Cannot create slowpath workqueue\n");
+ return -ENOMEM;
+ }
+
+ INIT_DELAYED_WORK(&hwfn->slowpath_task, qed_slowpath_task);
+ hwfn->slowpath_wq_active = true;
+ }
+
+ return 0;
+}
+
+static int qed_slowpath_start(struct qed_dev *cdev,
+ struct qed_slowpath_params *params)
+{
+ struct qed_drv_load_params drv_load_params;
+ struct qed_hw_init_params hw_init_params;
+ struct qed_mcp_drv_version drv_version;
+ struct qed_tunnel_info tunn_info;
+ const u8 *data = NULL;
+ struct qed_hwfn *hwfn;
+ struct qed_ptt *p_ptt;
+ int rc = -EINVAL;
+
+ if (qed_iov_wq_start(cdev))
+ goto err;
+
+ if (qed_slowpath_wq_start(cdev))
+ goto err;
+
+ if (IS_PF(cdev)) {
+ rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME,
+ &cdev->pdev->dev);
+ if (rc) {
+ DP_NOTICE(cdev,
+ "Failed to find fw file - /lib/firmware/%s\n",
+ QED_FW_FILE_NAME);
+ goto err;
+ }
+
+ if (cdev->num_hwfns == 1) {
+ p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
+ if (p_ptt) {
+ QED_LEADING_HWFN(cdev)->p_arfs_ptt = p_ptt;
+ } else {
+ DP_NOTICE(cdev,
+ "Failed to acquire PTT for aRFS\n");
+ rc = -EINVAL;
+ goto err;
+ }
+ }
+ }
+
+ cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS;
+ rc = qed_nic_setup(cdev);
+ if (rc)
+ goto err;
+
+ if (IS_PF(cdev))
+ rc = qed_slowpath_setup_int(cdev, params->int_mode);
+ else
+ rc = qed_slowpath_vf_setup_int(cdev);
+ if (rc)
+ goto err1;
+
+ if (IS_PF(cdev)) {
+ /* Allocate stream for unzipping */
+ rc = qed_alloc_stream_mem(cdev);
+ if (rc)
+ goto err2;
+
+ /* First Dword used to differentiate between various sources */
+ data = cdev->firmware->data + sizeof(u32);
+
+ qed_dbg_pf_init(cdev);
+ }
+
+ /* Start the slowpath */
+ memset(&hw_init_params, 0, sizeof(hw_init_params));
+ memset(&tunn_info, 0, sizeof(tunn_info));
+ tunn_info.vxlan.b_mode_enabled = true;
+ tunn_info.l2_gre.b_mode_enabled = true;
+ tunn_info.ip_gre.b_mode_enabled = true;
+ tunn_info.l2_geneve.b_mode_enabled = true;
+ tunn_info.ip_geneve.b_mode_enabled = true;
+ tunn_info.vxlan.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
+ tunn_info.l2_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
+ tunn_info.ip_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
+ tunn_info.l2_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
+ tunn_info.ip_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
+ hw_init_params.p_tunn = &tunn_info;
+ hw_init_params.b_hw_start = true;
+ hw_init_params.int_mode = cdev->int_params.out.int_mode;
+ hw_init_params.allow_npar_tx_switch = true;
+ hw_init_params.bin_fw_data = data;
+
+ memset(&drv_load_params, 0, sizeof(drv_load_params));
+ drv_load_params.is_crash_kernel = is_kdump_kernel();
+ drv_load_params.mfw_timeout_val = QED_LOAD_REQ_LOCK_TO_DEFAULT;
+ drv_load_params.avoid_eng_reset = false;
+ drv_load_params.override_force_load = QED_OVERRIDE_FORCE_LOAD_NONE;
+ hw_init_params.p_drv_load_params = &drv_load_params;
+
+ rc = qed_hw_init(cdev, &hw_init_params);
+ if (rc)
+ goto err2;
+
+ DP_INFO(cdev,
+ "HW initialization and function start completed successfully\n");
+
+ if (IS_PF(cdev)) {
+ cdev->tunn_feature_mask = (BIT(QED_MODE_VXLAN_TUNN) |
+ BIT(QED_MODE_L2GENEVE_TUNN) |
+ BIT(QED_MODE_IPGENEVE_TUNN) |
+ BIT(QED_MODE_L2GRE_TUNN) |
+ BIT(QED_MODE_IPGRE_TUNN));
+ }
+
+ /* Allocate LL2 interface if needed */
+ if (QED_LEADING_HWFN(cdev)->using_ll2) {
+ rc = qed_ll2_alloc_if(cdev);
+ if (rc)
+ goto err3;
+ }
+ if (IS_PF(cdev)) {
+ hwfn = QED_LEADING_HWFN(cdev);
+ drv_version.version = (params->drv_major << 24) |
+ (params->drv_minor << 16) |
+ (params->drv_rev << 8) |
+ (params->drv_eng);
+ strscpy(drv_version.name, params->name,
+ MCP_DRV_VER_STR_SIZE - 4);
+ rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
+ &drv_version);
+ if (rc) {
+ DP_NOTICE(cdev, "Failed sending drv version command\n");
+ goto err4;
+ }
+ }
+
+ qed_reset_vport_stats(cdev);
+
+ return 0;
+
+err4:
+ qed_ll2_dealloc_if(cdev);
+err3:
+ qed_hw_stop(cdev);
+err2:
+ qed_hw_timers_stop_all(cdev);
+ if (IS_PF(cdev))
+ qed_slowpath_irq_free(cdev);
+ qed_free_stream_mem(cdev);
+ qed_disable_msix(cdev);
+err1:
+ qed_resc_free(cdev);
+err:
+ if (IS_PF(cdev))
+ release_firmware(cdev->firmware);
+
+ if (IS_PF(cdev) && (cdev->num_hwfns == 1) &&
+ QED_LEADING_HWFN(cdev)->p_arfs_ptt)
+ qed_ptt_release(QED_LEADING_HWFN(cdev),
+ QED_LEADING_HWFN(cdev)->p_arfs_ptt);
+
+ qed_iov_wq_stop(cdev, false);
+
+ qed_slowpath_wq_stop(cdev);
+
+ return rc;
+}
+
+static int qed_slowpath_stop(struct qed_dev *cdev)
+{
+ if (!cdev)
+ return -ENODEV;
+
+ qed_slowpath_wq_stop(cdev);
+
+ qed_ll2_dealloc_if(cdev);
+
+ if (IS_PF(cdev)) {
+ if (cdev->num_hwfns == 1)
+ qed_ptt_release(QED_LEADING_HWFN(cdev),
+ QED_LEADING_HWFN(cdev)->p_arfs_ptt);
+ qed_free_stream_mem(cdev);
+ if (IS_QED_ETH_IF(cdev))
+ qed_sriov_disable(cdev, true);
+ }
+
+ qed_nic_stop(cdev);
+
+ if (IS_PF(cdev))
+ qed_slowpath_irq_free(cdev);
+
+ qed_disable_msix(cdev);
+
+ qed_resc_free(cdev);
+
+ qed_iov_wq_stop(cdev, true);
+
+ if (IS_PF(cdev))
+ release_firmware(cdev->firmware);
+
+ return 0;
+}
+
+static void qed_set_name(struct qed_dev *cdev, char name[NAME_SIZE])
+{
+ int i;
+
+ memcpy(cdev->name, name, NAME_SIZE);
+ for_each_hwfn(cdev, i)
+ snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
+}
+
+static u32 qed_sb_init(struct qed_dev *cdev,
+ struct qed_sb_info *sb_info,
+ void *sb_virt_addr,
+ dma_addr_t sb_phy_addr, u16 sb_id,
+ enum qed_sb_type type)
+{
+ struct qed_hwfn *p_hwfn;
+ struct qed_ptt *p_ptt;
+ u16 rel_sb_id;
+ u32 rc;
+
+ /* RoCE/Storage use a single engine in CMT mode while L2 uses both */
+ if (type == QED_SB_TYPE_L2_QUEUE) {
+ p_hwfn = &cdev->hwfns[sb_id % cdev->num_hwfns];
+ rel_sb_id = sb_id / cdev->num_hwfns;
+ } else {
+ p_hwfn = QED_AFFIN_HWFN(cdev);
+ rel_sb_id = sb_id;
+ }
+
+ DP_VERBOSE(cdev, NETIF_MSG_INTR,
+ "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
+ IS_LEAD_HWFN(p_hwfn) ? 0 : 1, rel_sb_id, sb_id);
+
+ if (IS_PF(p_hwfn->cdev)) {
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EBUSY;
+
+ rc = qed_int_sb_init(p_hwfn, p_ptt, sb_info, sb_virt_addr,
+ sb_phy_addr, rel_sb_id);
+ qed_ptt_release(p_hwfn, p_ptt);
+ } else {
+ rc = qed_int_sb_init(p_hwfn, NULL, sb_info, sb_virt_addr,
+ sb_phy_addr, rel_sb_id);
+ }
+
+ return rc;
+}
+
+static u32 qed_sb_release(struct qed_dev *cdev,
+ struct qed_sb_info *sb_info,
+ u16 sb_id,
+ enum qed_sb_type type)
+{
+ struct qed_hwfn *p_hwfn;
+ u16 rel_sb_id;
+ u32 rc;
+
+ /* RoCE/Storage use a single engine in CMT mode while L2 uses both */
+ if (type == QED_SB_TYPE_L2_QUEUE) {
+ p_hwfn = &cdev->hwfns[sb_id % cdev->num_hwfns];
+ rel_sb_id = sb_id / cdev->num_hwfns;
+ } else {
+ p_hwfn = QED_AFFIN_HWFN(cdev);
+ rel_sb_id = sb_id;
+ }
+
+ DP_VERBOSE(cdev, NETIF_MSG_INTR,
+ "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
+ IS_LEAD_HWFN(p_hwfn) ? 0 : 1, rel_sb_id, sb_id);
+
+ rc = qed_int_sb_release(p_hwfn, sb_info, rel_sb_id);
+
+ return rc;
+}
+
+static bool qed_can_link_change(struct qed_dev *cdev)
+{
+ return true;
+}
+
+static void qed_set_ext_speed_params(struct qed_mcp_link_params *link_params,
+ const struct qed_link_params *params)
+{
+ struct qed_mcp_link_speed_params *ext_speed = &link_params->ext_speed;
+ const struct qed_mfw_speed_map *map;
+ u32 i;
+
+ if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG)
+ ext_speed->autoneg = !!params->autoneg;
+
+ if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) {
+ ext_speed->advertised_speeds = 0;
+
+ for (i = 0; i < ARRAY_SIZE(qed_mfw_ext_maps); i++) {
+ map = qed_mfw_ext_maps + i;
+
+ if (linkmode_intersects(params->adv_speeds, map->caps))
+ ext_speed->advertised_speeds |= map->mfw_val;
+ }
+ }
+
+ if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED) {
+ switch (params->forced_speed) {
+ case SPEED_1000:
+ ext_speed->forced_speed = QED_EXT_SPEED_1G;
+ break;
+ case SPEED_10000:
+ ext_speed->forced_speed = QED_EXT_SPEED_10G;
+ break;
+ case SPEED_20000:
+ ext_speed->forced_speed = QED_EXT_SPEED_20G;
+ break;
+ case SPEED_25000:
+ ext_speed->forced_speed = QED_EXT_SPEED_25G;
+ break;
+ case SPEED_40000:
+ ext_speed->forced_speed = QED_EXT_SPEED_40G;
+ break;
+ case SPEED_50000:
+ ext_speed->forced_speed = QED_EXT_SPEED_50G_R |
+ QED_EXT_SPEED_50G_R2;
+ break;
+ case SPEED_100000:
+ ext_speed->forced_speed = QED_EXT_SPEED_100G_R2 |
+ QED_EXT_SPEED_100G_R4 |
+ QED_EXT_SPEED_100G_P4;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (!(params->override_flags & QED_LINK_OVERRIDE_FEC_CONFIG))
+ return;
+
+ switch (params->forced_speed) {
+ case SPEED_25000:
+ switch (params->fec) {
+ case FEC_FORCE_MODE_NONE:
+ link_params->ext_fec_mode = ETH_EXT_FEC_25G_NONE;
+ break;
+ case FEC_FORCE_MODE_FIRECODE:
+ link_params->ext_fec_mode = ETH_EXT_FEC_25G_BASE_R;
+ break;
+ case FEC_FORCE_MODE_RS:
+ link_params->ext_fec_mode = ETH_EXT_FEC_25G_RS528;
+ break;
+ case FEC_FORCE_MODE_AUTO:
+ link_params->ext_fec_mode = ETH_EXT_FEC_25G_RS528 |
+ ETH_EXT_FEC_25G_BASE_R |
+ ETH_EXT_FEC_25G_NONE;
+ break;
+ default:
+ break;
+ }
+
+ break;
+ case SPEED_40000:
+ switch (params->fec) {
+ case FEC_FORCE_MODE_NONE:
+ link_params->ext_fec_mode = ETH_EXT_FEC_40G_NONE;
+ break;
+ case FEC_FORCE_MODE_FIRECODE:
+ link_params->ext_fec_mode = ETH_EXT_FEC_40G_BASE_R;
+ break;
+ case FEC_FORCE_MODE_AUTO:
+ link_params->ext_fec_mode = ETH_EXT_FEC_40G_BASE_R |
+ ETH_EXT_FEC_40G_NONE;
+ break;
+ default:
+ break;
+ }
+
+ break;
+ case SPEED_50000:
+ switch (params->fec) {
+ case FEC_FORCE_MODE_NONE:
+ link_params->ext_fec_mode = ETH_EXT_FEC_50G_NONE;
+ break;
+ case FEC_FORCE_MODE_FIRECODE:
+ link_params->ext_fec_mode = ETH_EXT_FEC_50G_BASE_R;
+ break;
+ case FEC_FORCE_MODE_RS:
+ link_params->ext_fec_mode = ETH_EXT_FEC_50G_RS528;
+ break;
+ case FEC_FORCE_MODE_AUTO:
+ link_params->ext_fec_mode = ETH_EXT_FEC_50G_RS528 |
+ ETH_EXT_FEC_50G_BASE_R |
+ ETH_EXT_FEC_50G_NONE;
+ break;
+ default:
+ break;
+ }
+
+ break;
+ case SPEED_100000:
+ switch (params->fec) {
+ case FEC_FORCE_MODE_NONE:
+ link_params->ext_fec_mode = ETH_EXT_FEC_100G_NONE;
+ break;
+ case FEC_FORCE_MODE_FIRECODE:
+ link_params->ext_fec_mode = ETH_EXT_FEC_100G_BASE_R;
+ break;
+ case FEC_FORCE_MODE_RS:
+ link_params->ext_fec_mode = ETH_EXT_FEC_100G_RS528;
+ break;
+ case FEC_FORCE_MODE_AUTO:
+ link_params->ext_fec_mode = ETH_EXT_FEC_100G_RS528 |
+ ETH_EXT_FEC_100G_BASE_R |
+ ETH_EXT_FEC_100G_NONE;
+ break;
+ default:
+ break;
+ }
+
+ break;
+ default:
+ break;
+ }
+}
+
+static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params)
+{
+ struct qed_mcp_link_params *link_params;
+ struct qed_mcp_link_speed_params *speed;
+ const struct qed_mfw_speed_map *map;
+ struct qed_hwfn *hwfn;
+ struct qed_ptt *ptt;
+ int rc;
+ u32 i;
+
+ if (!cdev)
+ return -ENODEV;
+
+ /* The link should be set only once per PF */
+ hwfn = &cdev->hwfns[0];
+
+ /* When VF wants to set link, force it to read the bulletin instead.
+ * This mimics the PF behavior, where a noitification [both immediate
+ * and possible later] would be generated when changing properties.
+ */
+ if (IS_VF(cdev)) {
+ qed_schedule_iov(hwfn, QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG);
+ return 0;
+ }
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EBUSY;
+
+ link_params = qed_mcp_get_link_params(hwfn);
+ if (!link_params)
+ return -ENODATA;
+
+ speed = &link_params->speed;
+
+ if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG)
+ speed->autoneg = !!params->autoneg;
+
+ if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) {
+ speed->advertised_speeds = 0;
+
+ for (i = 0; i < ARRAY_SIZE(qed_mfw_legacy_maps); i++) {
+ map = qed_mfw_legacy_maps + i;
+
+ if (linkmode_intersects(params->adv_speeds, map->caps))
+ speed->advertised_speeds |= map->mfw_val;
+ }
+ }
+
+ if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED)
+ speed->forced_speed = params->forced_speed;
+
+ if (qed_mcp_is_ext_speed_supported(hwfn))
+ qed_set_ext_speed_params(link_params, params);
+
+ if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) {
+ if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
+ link_params->pause.autoneg = true;
+ else
+ link_params->pause.autoneg = false;
+ if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE)
+ link_params->pause.forced_rx = true;
+ else
+ link_params->pause.forced_rx = false;
+ if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE)
+ link_params->pause.forced_tx = true;
+ else
+ link_params->pause.forced_tx = false;
+ }
+
+ if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) {
+ switch (params->loopback_mode) {
+ case QED_LINK_LOOPBACK_INT_PHY:
+ link_params->loopback_mode = ETH_LOOPBACK_INT_PHY;
+ break;
+ case QED_LINK_LOOPBACK_EXT_PHY:
+ link_params->loopback_mode = ETH_LOOPBACK_EXT_PHY;
+ break;
+ case QED_LINK_LOOPBACK_EXT:
+ link_params->loopback_mode = ETH_LOOPBACK_EXT;
+ break;
+ case QED_LINK_LOOPBACK_MAC:
+ link_params->loopback_mode = ETH_LOOPBACK_MAC;
+ break;
+ case QED_LINK_LOOPBACK_CNIG_AH_ONLY_0123:
+ link_params->loopback_mode =
+ ETH_LOOPBACK_CNIG_AH_ONLY_0123;
+ break;
+ case QED_LINK_LOOPBACK_CNIG_AH_ONLY_2301:
+ link_params->loopback_mode =
+ ETH_LOOPBACK_CNIG_AH_ONLY_2301;
+ break;
+ case QED_LINK_LOOPBACK_PCS_AH_ONLY:
+ link_params->loopback_mode = ETH_LOOPBACK_PCS_AH_ONLY;
+ break;
+ case QED_LINK_LOOPBACK_REVERSE_MAC_AH_ONLY:
+ link_params->loopback_mode =
+ ETH_LOOPBACK_REVERSE_MAC_AH_ONLY;
+ break;
+ case QED_LINK_LOOPBACK_INT_PHY_FEA_AH_ONLY:
+ link_params->loopback_mode =
+ ETH_LOOPBACK_INT_PHY_FEA_AH_ONLY;
+ break;
+ default:
+ link_params->loopback_mode = ETH_LOOPBACK_NONE;
+ break;
+ }
+ }
+
+ if (params->override_flags & QED_LINK_OVERRIDE_EEE_CONFIG)
+ memcpy(&link_params->eee, &params->eee,
+ sizeof(link_params->eee));
+
+ if (params->override_flags & QED_LINK_OVERRIDE_FEC_CONFIG)
+ link_params->fec = params->fec;
+
+ rc = qed_mcp_set_link(hwfn, ptt, params->link_up);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return rc;
+}
+
+static int qed_get_port_type(u32 media_type)
+{
+ int port_type;
+
+ switch (media_type) {
+ case MEDIA_SFPP_10G_FIBER:
+ case MEDIA_SFP_1G_FIBER:
+ case MEDIA_XFP_FIBER:
+ case MEDIA_MODULE_FIBER:
+ port_type = PORT_FIBRE;
+ break;
+ case MEDIA_DA_TWINAX:
+ port_type = PORT_DA;
+ break;
+ case MEDIA_BASE_T:
+ port_type = PORT_TP;
+ break;
+ case MEDIA_KR:
+ case MEDIA_NOT_PRESENT:
+ port_type = PORT_NONE;
+ break;
+ case MEDIA_UNSPECIFIED:
+ default:
+ port_type = PORT_OTHER;
+ break;
+ }
+ return port_type;
+}
+
+static int qed_get_link_data(struct qed_hwfn *hwfn,
+ struct qed_mcp_link_params *params,
+ struct qed_mcp_link_state *link,
+ struct qed_mcp_link_capabilities *link_caps)
+{
+ void *p;
+
+ if (!IS_PF(hwfn->cdev)) {
+ qed_vf_get_link_params(hwfn, params);
+ qed_vf_get_link_state(hwfn, link);
+ qed_vf_get_link_caps(hwfn, link_caps);
+
+ return 0;
+ }
+
+ p = qed_mcp_get_link_params(hwfn);
+ if (!p)
+ return -ENXIO;
+ memcpy(params, p, sizeof(*params));
+
+ p = qed_mcp_get_link_state(hwfn);
+ if (!p)
+ return -ENXIO;
+ memcpy(link, p, sizeof(*link));
+
+ p = qed_mcp_get_link_capabilities(hwfn);
+ if (!p)
+ return -ENXIO;
+ memcpy(link_caps, p, sizeof(*link_caps));
+
+ return 0;
+}
+
+static void qed_fill_link_capability(struct qed_hwfn *hwfn,
+ struct qed_ptt *ptt, u32 capability,
+ unsigned long *if_caps)
+{
+ u32 media_type, tcvr_state, tcvr_type;
+ u32 speed_mask, board_cfg;
+
+ if (qed_mcp_get_media_type(hwfn, ptt, &media_type))
+ media_type = MEDIA_UNSPECIFIED;
+
+ if (qed_mcp_get_transceiver_data(hwfn, ptt, &tcvr_state, &tcvr_type))
+ tcvr_type = ETH_TRANSCEIVER_STATE_UNPLUGGED;
+
+ if (qed_mcp_trans_speed_mask(hwfn, ptt, &speed_mask))
+ speed_mask = 0xFFFFFFFF;
+
+ if (qed_mcp_get_board_config(hwfn, ptt, &board_cfg))
+ board_cfg = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED;
+
+ DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV,
+ "Media_type = 0x%x tcvr_state = 0x%x tcvr_type = 0x%x speed_mask = 0x%x board_cfg = 0x%x\n",
+ media_type, tcvr_state, tcvr_type, speed_mask, board_cfg);
+
+ switch (media_type) {
+ case MEDIA_DA_TWINAX:
+ phylink_set(if_caps, FIBRE);
+
+ if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
+ phylink_set(if_caps, 20000baseKR2_Full);
+
+ /* For DAC media multiple speed capabilities are supported */
+ capability |= speed_mask;
+
+ if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
+ phylink_set(if_caps, 1000baseKX_Full);
+ if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
+ phylink_set(if_caps, 10000baseCR_Full);
+
+ if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
+ switch (tcvr_type) {
+ case ETH_TRANSCEIVER_TYPE_40G_CR4:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR:
+ phylink_set(if_caps, 40000baseCR4_Full);
+ break;
+ default:
+ break;
+ }
+
+ if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
+ phylink_set(if_caps, 25000baseCR_Full);
+ if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
+ phylink_set(if_caps, 50000baseCR2_Full);
+
+ if (capability &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
+ switch (tcvr_type) {
+ case ETH_TRANSCEIVER_TYPE_100G_CR4:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR:
+ phylink_set(if_caps, 100000baseCR4_Full);
+ break;
+ default:
+ break;
+ }
+
+ break;
+ case MEDIA_BASE_T:
+ phylink_set(if_caps, TP);
+
+ if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_EXT_PHY) {
+ if (capability &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
+ phylink_set(if_caps, 1000baseT_Full);
+ if (capability &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
+ phylink_set(if_caps, 10000baseT_Full);
+ }
+
+ if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_MODULE) {
+ phylink_set(if_caps, FIBRE);
+
+ switch (tcvr_type) {
+ case ETH_TRANSCEIVER_TYPE_1000BASET:
+ phylink_set(if_caps, 1000baseT_Full);
+ break;
+ case ETH_TRANSCEIVER_TYPE_10G_BASET:
+ phylink_set(if_caps, 10000baseT_Full);
+ break;
+ default:
+ break;
+ }
+ }
+
+ break;
+ case MEDIA_SFP_1G_FIBER:
+ case MEDIA_SFPP_10G_FIBER:
+ case MEDIA_XFP_FIBER:
+ case MEDIA_MODULE_FIBER:
+ phylink_set(if_caps, FIBRE);
+ capability |= speed_mask;
+
+ if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
+ switch (tcvr_type) {
+ case ETH_TRANSCEIVER_TYPE_1G_LX:
+ case ETH_TRANSCEIVER_TYPE_1G_SX:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR:
+ phylink_set(if_caps, 1000baseKX_Full);
+ break;
+ default:
+ break;
+ }
+
+ if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
+ switch (tcvr_type) {
+ case ETH_TRANSCEIVER_TYPE_10G_SR:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR:
+ phylink_set(if_caps, 10000baseSR_Full);
+ break;
+ case ETH_TRANSCEIVER_TYPE_10G_LR:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_LR:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR:
+ phylink_set(if_caps, 10000baseLR_Full);
+ break;
+ case ETH_TRANSCEIVER_TYPE_10G_LRM:
+ phylink_set(if_caps, 10000baseLRM_Full);
+ break;
+ case ETH_TRANSCEIVER_TYPE_10G_ER:
+ phylink_set(if_caps, 10000baseR_FEC);
+ break;
+ default:
+ break;
+ }
+
+ if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
+ phylink_set(if_caps, 20000baseKR2_Full);
+
+ if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
+ switch (tcvr_type) {
+ case ETH_TRANSCEIVER_TYPE_25G_SR:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR:
+ phylink_set(if_caps, 25000baseSR_Full);
+ break;
+ default:
+ break;
+ }
+
+ if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
+ switch (tcvr_type) {
+ case ETH_TRANSCEIVER_TYPE_40G_LR4:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR:
+ phylink_set(if_caps, 40000baseLR4_Full);
+ break;
+ case ETH_TRANSCEIVER_TYPE_40G_SR4:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR:
+ phylink_set(if_caps, 40000baseSR4_Full);
+ break;
+ default:
+ break;
+ }
+
+ if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
+ phylink_set(if_caps, 50000baseKR2_Full);
+
+ if (capability &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
+ switch (tcvr_type) {
+ case ETH_TRANSCEIVER_TYPE_100G_SR4:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR:
+ phylink_set(if_caps, 100000baseSR4_Full);
+ break;
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR:
+ phylink_set(if_caps, 100000baseLR4_ER4_Full);
+ break;
+ default:
+ break;
+ }
+
+ break;
+ case MEDIA_KR:
+ phylink_set(if_caps, Backplane);
+
+ if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
+ phylink_set(if_caps, 20000baseKR2_Full);
+ if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
+ phylink_set(if_caps, 1000baseKX_Full);
+ if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
+ phylink_set(if_caps, 10000baseKR_Full);
+ if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
+ phylink_set(if_caps, 25000baseKR_Full);
+ if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
+ phylink_set(if_caps, 40000baseKR4_Full);
+ if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
+ phylink_set(if_caps, 50000baseKR2_Full);
+ if (capability &
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
+ phylink_set(if_caps, 100000baseKR4_Full);
+
+ break;
+ case MEDIA_UNSPECIFIED:
+ case MEDIA_NOT_PRESENT:
+ default:
+ DP_VERBOSE(hwfn->cdev, QED_MSG_DEBUG,
+ "Unknown media and transceiver type;\n");
+ break;
+ }
+}
+
+static void qed_lp_caps_to_speed_mask(u32 caps, u32 *speed_mask)
+{
+ *speed_mask = 0;
+
+ if (caps &
+ (QED_LINK_PARTNER_SPEED_1G_FD | QED_LINK_PARTNER_SPEED_1G_HD))
+ *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
+ if (caps & QED_LINK_PARTNER_SPEED_10G)
+ *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
+ if (caps & QED_LINK_PARTNER_SPEED_20G)
+ *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G;
+ if (caps & QED_LINK_PARTNER_SPEED_25G)
+ *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
+ if (caps & QED_LINK_PARTNER_SPEED_40G)
+ *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
+ if (caps & QED_LINK_PARTNER_SPEED_50G)
+ *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G;
+ if (caps & QED_LINK_PARTNER_SPEED_100G)
+ *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G;
+}
+
+static void qed_fill_link(struct qed_hwfn *hwfn,
+ struct qed_ptt *ptt,
+ struct qed_link_output *if_link)
+{
+ struct qed_mcp_link_capabilities link_caps;
+ struct qed_mcp_link_params params;
+ struct qed_mcp_link_state link;
+ u32 media_type, speed_mask;
+
+ memset(if_link, 0, sizeof(*if_link));
+
+ /* Prepare source inputs */
+ if (qed_get_link_data(hwfn, &params, &link, &link_caps)) {
+ dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n");
+ return;
+ }
+
+ /* Set the link parameters to pass to protocol driver */
+ if (link.link_up)
+ if_link->link_up = true;
+
+ if (IS_PF(hwfn->cdev) && qed_mcp_is_ext_speed_supported(hwfn)) {
+ if (link_caps.default_ext_autoneg)
+ phylink_set(if_link->supported_caps, Autoneg);
+
+ linkmode_copy(if_link->advertised_caps, if_link->supported_caps);
+
+ if (params.ext_speed.autoneg)
+ phylink_set(if_link->advertised_caps, Autoneg);
+ else
+ phylink_clear(if_link->advertised_caps, Autoneg);
+
+ qed_fill_link_capability(hwfn, ptt,
+ params.ext_speed.advertised_speeds,
+ if_link->advertised_caps);
+ } else {
+ if (link_caps.default_speed_autoneg)
+ phylink_set(if_link->supported_caps, Autoneg);
+
+ linkmode_copy(if_link->advertised_caps, if_link->supported_caps);
+
+ if (params.speed.autoneg)
+ phylink_set(if_link->advertised_caps, Autoneg);
+ else
+ phylink_clear(if_link->advertised_caps, Autoneg);
+ }
+
+ if (params.pause.autoneg ||
+ (params.pause.forced_rx && params.pause.forced_tx))
+ phylink_set(if_link->supported_caps, Asym_Pause);
+ if (params.pause.autoneg || params.pause.forced_rx ||
+ params.pause.forced_tx)
+ phylink_set(if_link->supported_caps, Pause);
+
+ if_link->sup_fec = link_caps.fec_default;
+ if_link->active_fec = params.fec;
+
+ /* Fill link advertised capability */
+ qed_fill_link_capability(hwfn, ptt, params.speed.advertised_speeds,
+ if_link->advertised_caps);
+
+ /* Fill link supported capability */
+ qed_fill_link_capability(hwfn, ptt, link_caps.speed_capabilities,
+ if_link->supported_caps);
+
+ /* Fill partner advertised capability */
+ qed_lp_caps_to_speed_mask(link.partner_adv_speed, &speed_mask);
+ qed_fill_link_capability(hwfn, ptt, speed_mask, if_link->lp_caps);
+
+ if (link.link_up)
+ if_link->speed = link.speed;
+
+ /* TODO - fill duplex properly */
+ if_link->duplex = DUPLEX_FULL;
+ qed_mcp_get_media_type(hwfn, ptt, &media_type);
+ if_link->port = qed_get_port_type(media_type);
+
+ if_link->autoneg = params.speed.autoneg;
+
+ if (params.pause.autoneg)
+ if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
+ if (params.pause.forced_rx)
+ if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE;
+ if (params.pause.forced_tx)
+ if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE;
+
+ if (link.an_complete)
+ phylink_set(if_link->lp_caps, Autoneg);
+ if (link.partner_adv_pause)
+ phylink_set(if_link->lp_caps, Pause);
+ if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE ||
+ link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE)
+ phylink_set(if_link->lp_caps, Asym_Pause);
+
+ if (link_caps.default_eee == QED_MCP_EEE_UNSUPPORTED) {
+ if_link->eee_supported = false;
+ } else {
+ if_link->eee_supported = true;
+ if_link->eee_active = link.eee_active;
+ if_link->sup_caps = link_caps.eee_speed_caps;
+ /* MFW clears adv_caps on eee disable; use configured value */
+ if_link->eee.adv_caps = link.eee_adv_caps ? link.eee_adv_caps :
+ params.eee.adv_caps;
+ if_link->eee.lp_adv_caps = link.eee_lp_adv_caps;
+ if_link->eee.enable = params.eee.enable;
+ if_link->eee.tx_lpi_enable = params.eee.tx_lpi_enable;
+ if_link->eee.tx_lpi_timer = params.eee.tx_lpi_timer;
+ }
+}
+
+static void qed_get_current_link(struct qed_dev *cdev,
+ struct qed_link_output *if_link)
+{
+ struct qed_hwfn *hwfn;
+ struct qed_ptt *ptt;
+ int i;
+
+ hwfn = &cdev->hwfns[0];
+ if (IS_PF(cdev)) {
+ ptt = qed_ptt_acquire(hwfn);
+ if (ptt) {
+ qed_fill_link(hwfn, ptt, if_link);
+ qed_ptt_release(hwfn, ptt);
+ } else {
+ DP_NOTICE(hwfn, "Failed to fill link; No PTT\n");
+ }
+ } else {
+ qed_fill_link(hwfn, NULL, if_link);
+ }
+
+ for_each_hwfn(cdev, i)
+ qed_inform_vf_link_state(&cdev->hwfns[i]);
+}
+
+void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt)
+{
+ void *cookie = hwfn->cdev->ops_cookie;
+ struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common;
+ struct qed_link_output if_link;
+
+ qed_fill_link(hwfn, ptt, &if_link);
+ qed_inform_vf_link_state(hwfn);
+
+ if (IS_LEAD_HWFN(hwfn) && cookie)
+ op->link_update(cookie, &if_link);
+}
+
+void qed_bw_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt)
+{
+ void *cookie = hwfn->cdev->ops_cookie;
+ struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common;
+
+ if (IS_LEAD_HWFN(hwfn) && cookie && op && op->bw_update)
+ op->bw_update(cookie);
+}
+
+static int qed_drain(struct qed_dev *cdev)
+{
+ struct qed_hwfn *hwfn;
+ struct qed_ptt *ptt;
+ int i, rc;
+
+ if (IS_VF(cdev))
+ return 0;
+
+ for_each_hwfn(cdev, i) {
+ hwfn = &cdev->hwfns[i];
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt) {
+ DP_NOTICE(hwfn, "Failed to drain NIG; No PTT\n");
+ return -EBUSY;
+ }
+ rc = qed_mcp_drain(hwfn, ptt);
+ qed_ptt_release(hwfn, ptt);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+static u32 qed_nvm_flash_image_access_crc(struct qed_dev *cdev,
+ struct qed_nvm_image_att *nvm_image,
+ u32 *crc)
+{
+ u8 *buf = NULL;
+ int rc;
+
+ /* Allocate a buffer for holding the nvram image */
+ buf = kzalloc(nvm_image->length, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /* Read image into buffer */
+ rc = qed_mcp_nvm_read(cdev, nvm_image->start_addr,
+ buf, nvm_image->length);
+ if (rc) {
+ DP_ERR(cdev, "Failed reading image from nvm\n");
+ goto out;
+ }
+
+ /* Convert the buffer into big-endian format (excluding the
+ * closing 4 bytes of CRC).
+ */
+ cpu_to_be32_array((__force __be32 *)buf, (const u32 *)buf,
+ DIV_ROUND_UP(nvm_image->length - 4, 4));
+
+ /* Calc CRC for the "actual" image buffer, i.e. not including
+ * the last 4 CRC bytes.
+ */
+ *crc = ~crc32(~0U, buf, nvm_image->length - 4);
+ *crc = (__force u32)cpu_to_be32p(crc);
+
+out:
+ kfree(buf);
+
+ return rc;
+}
+
+/* Binary file format -
+ * /----------------------------------------------------------------------\
+ * 0B | 0x4 [command index] |
+ * 4B | image_type | Options | Number of register settings |
+ * 8B | Value |
+ * 12B | Mask |
+ * 16B | Offset |
+ * \----------------------------------------------------------------------/
+ * There can be several Value-Mask-Offset sets as specified by 'Number of...'.
+ * Options - 0'b - Calculate & Update CRC for image
+ */
+static int qed_nvm_flash_image_access(struct qed_dev *cdev, const u8 **data,
+ bool *check_resp)
+{
+ struct qed_nvm_image_att nvm_image;
+ struct qed_hwfn *p_hwfn;
+ bool is_crc = false;
+ u32 image_type;
+ int rc = 0, i;
+ u16 len;
+
+ *data += 4;
+ image_type = **data;
+ p_hwfn = QED_LEADING_HWFN(cdev);
+ for (i = 0; i < p_hwfn->nvm_info.num_images; i++)
+ if (image_type == p_hwfn->nvm_info.image_att[i].image_type)
+ break;
+ if (i == p_hwfn->nvm_info.num_images) {
+ DP_ERR(cdev, "Failed to find nvram image of type %08x\n",
+ image_type);
+ return -ENOENT;
+ }
+
+ nvm_image.start_addr = p_hwfn->nvm_info.image_att[i].nvm_start_addr;
+ nvm_image.length = p_hwfn->nvm_info.image_att[i].len;
+
+ DP_VERBOSE(cdev, NETIF_MSG_DRV,
+ "Read image %02x; type = %08x; NVM [%08x,...,%08x]\n",
+ **data, image_type, nvm_image.start_addr,
+ nvm_image.start_addr + nvm_image.length - 1);
+ (*data)++;
+ is_crc = !!(**data & BIT(0));
+ (*data)++;
+ len = *((u16 *)*data);
+ *data += 2;
+ if (is_crc) {
+ u32 crc = 0;
+
+ rc = qed_nvm_flash_image_access_crc(cdev, &nvm_image, &crc);
+ if (rc) {
+ DP_ERR(cdev, "Failed calculating CRC, rc = %d\n", rc);
+ goto exit;
+ }
+
+ rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM,
+ (nvm_image.start_addr +
+ nvm_image.length - 4), (u8 *)&crc, 4);
+ if (rc)
+ DP_ERR(cdev, "Failed writing to %08x, rc = %d\n",
+ nvm_image.start_addr + nvm_image.length - 4, rc);
+ goto exit;
+ }
+
+ /* Iterate over the values for setting */
+ while (len) {
+ u32 offset, mask, value, cur_value;
+ u8 buf[4];
+
+ value = *((u32 *)*data);
+ *data += 4;
+ mask = *((u32 *)*data);
+ *data += 4;
+ offset = *((u32 *)*data);
+ *data += 4;
+
+ rc = qed_mcp_nvm_read(cdev, nvm_image.start_addr + offset, buf,
+ 4);
+ if (rc) {
+ DP_ERR(cdev, "Failed reading from %08x\n",
+ nvm_image.start_addr + offset);
+ goto exit;
+ }
+
+ cur_value = le32_to_cpu(*((__le32 *)buf));
+ DP_VERBOSE(cdev, NETIF_MSG_DRV,
+ "NVM %08x: %08x -> %08x [Value %08x Mask %08x]\n",
+ nvm_image.start_addr + offset, cur_value,
+ (cur_value & ~mask) | (value & mask), value, mask);
+ value = (value & mask) | (cur_value & ~mask);
+ rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM,
+ nvm_image.start_addr + offset,
+ (u8 *)&value, 4);
+ if (rc) {
+ DP_ERR(cdev, "Failed writing to %08x\n",
+ nvm_image.start_addr + offset);
+ goto exit;
+ }
+
+ len--;
+ }
+exit:
+ return rc;
+}
+
+/* Binary file format -
+ * /----------------------------------------------------------------------\
+ * 0B | 0x3 [command index] |
+ * 4B | b'0: check_response? | b'1-31 reserved |
+ * 8B | File-type | reserved |
+ * 12B | Image length in bytes |
+ * \----------------------------------------------------------------------/
+ * Start a new file of the provided type
+ */
+static int qed_nvm_flash_image_file_start(struct qed_dev *cdev,
+ const u8 **data, bool *check_resp)
+{
+ u32 file_type, file_size = 0;
+ int rc;
+
+ *data += 4;
+ *check_resp = !!(**data & BIT(0));
+ *data += 4;
+ file_type = **data;
+
+ DP_VERBOSE(cdev, NETIF_MSG_DRV,
+ "About to start a new file of type %02x\n", file_type);
+ if (file_type == DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI) {
+ *data += 4;
+ file_size = *((u32 *)(*data));
+ }
+
+ rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_BEGIN, file_type,
+ (u8 *)(&file_size), 4);
+ *data += 4;
+
+ return rc;
+}
+
+/* Binary file format -
+ * /----------------------------------------------------------------------\
+ * 0B | 0x2 [command index] |
+ * 4B | Length in bytes |
+ * 8B | b'0: check_response? | b'1-31 reserved |
+ * 12B | Offset in bytes |
+ * 16B | Data ... |
+ * \----------------------------------------------------------------------/
+ * Write data as part of a file that was previously started. Data should be
+ * of length equal to that provided in the message
+ */
+static int qed_nvm_flash_image_file_data(struct qed_dev *cdev,
+ const u8 **data, bool *check_resp)
+{
+ u32 offset, len;
+ int rc;
+
+ *data += 4;
+ len = *((u32 *)(*data));
+ *data += 4;
+ *check_resp = !!(**data & BIT(0));
+ *data += 4;
+ offset = *((u32 *)(*data));
+ *data += 4;
+
+ DP_VERBOSE(cdev, NETIF_MSG_DRV,
+ "About to write File-data: %08x bytes to offset %08x\n",
+ len, offset);
+
+ rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_DATA, offset,
+ (char *)(*data), len);
+ *data += len;
+
+ return rc;
+}
+
+/* Binary file format [General header] -
+ * /----------------------------------------------------------------------\
+ * 0B | QED_NVM_SIGNATURE |
+ * 4B | Length in bytes |
+ * 8B | Highest command in this batchfile | Reserved |
+ * \----------------------------------------------------------------------/
+ */
+static int qed_nvm_flash_image_validate(struct qed_dev *cdev,
+ const struct firmware *image,
+ const u8 **data)
+{
+ u32 signature, len;
+
+ /* Check minimum size */
+ if (image->size < 12) {
+ DP_ERR(cdev, "Image is too short [%08x]\n", (u32)image->size);
+ return -EINVAL;
+ }
+
+ /* Check signature */
+ signature = *((u32 *)(*data));
+ if (signature != QED_NVM_SIGNATURE) {
+ DP_ERR(cdev, "Wrong signature '%08x'\n", signature);
+ return -EINVAL;
+ }
+
+ *data += 4;
+ /* Validate internal size equals the image-size */
+ len = *((u32 *)(*data));
+ if (len != image->size) {
+ DP_ERR(cdev, "Size mismatch: internal = %08x image = %08x\n",
+ len, (u32)image->size);
+ return -EINVAL;
+ }
+
+ *data += 4;
+ /* Make sure driver familiar with all commands necessary for this */
+ if (*((u16 *)(*data)) >= QED_NVM_FLASH_CMD_NVM_MAX) {
+ DP_ERR(cdev, "File contains unsupported commands [Need %04x]\n",
+ *((u16 *)(*data)));
+ return -EINVAL;
+ }
+
+ *data += 4;
+
+ return 0;
+}
+
+/* Binary file format -
+ * /----------------------------------------------------------------------\
+ * 0B | 0x5 [command index] |
+ * 4B | Number of config attributes | Reserved |
+ * 4B | Config ID | Entity ID | Length |
+ * 4B | Value |
+ * | |
+ * \----------------------------------------------------------------------/
+ * There can be several cfg_id-entity_id-Length-Value sets as specified by
+ * 'Number of config attributes'.
+ *
+ * The API parses config attributes from the user provided buffer and flashes
+ * them to the respective NVM path using Management FW inerface.
+ */
+static int qed_nvm_flash_cfg_write(struct qed_dev *cdev, const u8 **data)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ u8 entity_id, len, buf[32];
+ bool need_nvm_init = true;
+ struct qed_ptt *ptt;
+ u16 cfg_id, count;
+ int rc = 0, i;
+ u32 flags;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EAGAIN;
+
+ /* NVM CFG ID attribute header */
+ *data += 4;
+ count = *((u16 *)*data);
+ *data += 4;
+
+ DP_VERBOSE(cdev, NETIF_MSG_DRV,
+ "Read config ids: num_attrs = %0d\n", count);
+ /* NVM CFG ID attributes. Start loop index from 1 to avoid additional
+ * arithmetic operations in the implementation.
+ */
+ for (i = 1; i <= count; i++) {
+ cfg_id = *((u16 *)*data);
+ *data += 2;
+ entity_id = **data;
+ (*data)++;
+ len = **data;
+ (*data)++;
+ memcpy(buf, *data, len);
+ *data += len;
+
+ flags = 0;
+ if (need_nvm_init) {
+ flags |= QED_NVM_CFG_OPTION_INIT;
+ need_nvm_init = false;
+ }
+
+ /* Commit to flash and free the resources */
+ if (!(i % QED_NVM_CFG_MAX_ATTRS) || i == count) {
+ flags |= QED_NVM_CFG_OPTION_COMMIT |
+ QED_NVM_CFG_OPTION_FREE;
+ need_nvm_init = true;
+ }
+
+ if (entity_id)
+ flags |= QED_NVM_CFG_OPTION_ENTITY_SEL;
+
+ DP_VERBOSE(cdev, NETIF_MSG_DRV,
+ "cfg_id = %d entity = %d len = %d\n", cfg_id,
+ entity_id, len);
+ rc = qed_mcp_nvm_set_cfg(hwfn, ptt, cfg_id, entity_id, flags,
+ buf, len);
+ if (rc) {
+ DP_ERR(cdev, "Error %d configuring %d\n", rc, cfg_id);
+ break;
+ }
+ }
+
+ qed_ptt_release(hwfn, ptt);
+
+ return rc;
+}
+
+#define QED_MAX_NVM_BUF_LEN 32
+static int qed_nvm_flash_cfg_len(struct qed_dev *cdev, u32 cmd)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ u8 buf[QED_MAX_NVM_BUF_LEN];
+ struct qed_ptt *ptt;
+ u32 len;
+ int rc;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return QED_MAX_NVM_BUF_LEN;
+
+ rc = qed_mcp_nvm_get_cfg(hwfn, ptt, cmd, 0, QED_NVM_CFG_GET_FLAGS, buf,
+ &len);
+ if (rc || !len) {
+ DP_ERR(cdev, "Error %d reading %d\n", rc, cmd);
+ len = QED_MAX_NVM_BUF_LEN;
+ }
+
+ qed_ptt_release(hwfn, ptt);
+
+ return len;
+}
+
+static int qed_nvm_flash_cfg_read(struct qed_dev *cdev, u8 **data,
+ u32 cmd, u32 entity_id)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *ptt;
+ u32 flags, len;
+ int rc = 0;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EAGAIN;
+
+ DP_VERBOSE(cdev, NETIF_MSG_DRV,
+ "Read config cmd = %d entity id %d\n", cmd, entity_id);
+ flags = entity_id ? QED_NVM_CFG_GET_PF_FLAGS : QED_NVM_CFG_GET_FLAGS;
+ rc = qed_mcp_nvm_get_cfg(hwfn, ptt, cmd, entity_id, flags, *data, &len);
+ if (rc)
+ DP_ERR(cdev, "Error %d reading %d\n", rc, cmd);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return rc;
+}
+
+static int qed_nvm_flash(struct qed_dev *cdev, const char *name)
+{
+ const struct firmware *image;
+ const u8 *data, *data_end;
+ u32 cmd_type;
+ int rc;
+
+ rc = request_firmware(&image, name, &cdev->pdev->dev);
+ if (rc) {
+ DP_ERR(cdev, "Failed to find '%s'\n", name);
+ return rc;
+ }
+
+ DP_VERBOSE(cdev, NETIF_MSG_DRV,
+ "Flashing '%s' - firmware's data at %p, size is %08x\n",
+ name, image->data, (u32)image->size);
+ data = image->data;
+ data_end = data + image->size;
+
+ rc = qed_nvm_flash_image_validate(cdev, image, &data);
+ if (rc)
+ goto exit;
+
+ while (data < data_end) {
+ bool check_resp = false;
+
+ /* Parse the actual command */
+ cmd_type = *((u32 *)data);
+ switch (cmd_type) {
+ case QED_NVM_FLASH_CMD_FILE_DATA:
+ rc = qed_nvm_flash_image_file_data(cdev, &data,
+ &check_resp);
+ break;
+ case QED_NVM_FLASH_CMD_FILE_START:
+ rc = qed_nvm_flash_image_file_start(cdev, &data,
+ &check_resp);
+ break;
+ case QED_NVM_FLASH_CMD_NVM_CHANGE:
+ rc = qed_nvm_flash_image_access(cdev, &data,
+ &check_resp);
+ break;
+ case QED_NVM_FLASH_CMD_NVM_CFG_ID:
+ rc = qed_nvm_flash_cfg_write(cdev, &data);
+ break;
+ default:
+ DP_ERR(cdev, "Unknown command %08x\n", cmd_type);
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ if (rc) {
+ DP_ERR(cdev, "Command %08x failed\n", cmd_type);
+ goto exit;
+ }
+
+ /* Check response if needed */
+ if (check_resp) {
+ u32 mcp_response = 0;
+
+ if (qed_mcp_nvm_resp(cdev, (u8 *)&mcp_response)) {
+ DP_ERR(cdev, "Failed getting MCP response\n");
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ switch (mcp_response & FW_MSG_CODE_MASK) {
+ case FW_MSG_CODE_OK:
+ case FW_MSG_CODE_NVM_OK:
+ case FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK:
+ case FW_MSG_CODE_PHY_OK:
+ break;
+ default:
+ DP_ERR(cdev, "MFW returns error: %08x\n",
+ mcp_response);
+ rc = -EINVAL;
+ goto exit;
+ }
+ }
+ }
+
+exit:
+ release_firmware(image);
+
+ return rc;
+}
+
+static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type,
+ u8 *buf, u16 len)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+
+ return qed_mcp_get_nvm_image(hwfn, type, buf, len);
+}
+
+void qed_schedule_recovery_handler(struct qed_hwfn *p_hwfn)
+{
+ struct qed_common_cb_ops *ops = p_hwfn->cdev->protocol_ops.common;
+ void *cookie = p_hwfn->cdev->ops_cookie;
+
+ if (ops && ops->schedule_recovery_handler)
+ ops->schedule_recovery_handler(cookie);
+}
+
+static const char * const qed_hw_err_type_descr[] = {
+ [QED_HW_ERR_FAN_FAIL] = "Fan Failure",
+ [QED_HW_ERR_MFW_RESP_FAIL] = "MFW Response Failure",
+ [QED_HW_ERR_HW_ATTN] = "HW Attention",
+ [QED_HW_ERR_DMAE_FAIL] = "DMAE Failure",
+ [QED_HW_ERR_RAMROD_FAIL] = "Ramrod Failure",
+ [QED_HW_ERR_FW_ASSERT] = "FW Assertion",
+ [QED_HW_ERR_LAST] = "Unknown",
+};
+
+void qed_hw_error_occurred(struct qed_hwfn *p_hwfn,
+ enum qed_hw_err_type err_type)
+{
+ struct qed_common_cb_ops *ops = p_hwfn->cdev->protocol_ops.common;
+ void *cookie = p_hwfn->cdev->ops_cookie;
+ const char *err_str;
+
+ if (err_type > QED_HW_ERR_LAST)
+ err_type = QED_HW_ERR_LAST;
+ err_str = qed_hw_err_type_descr[err_type];
+
+ DP_NOTICE(p_hwfn, "HW error occurred [%s]\n", err_str);
+
+ /* Call the HW error handler of the protocol driver.
+ * If it is not available - perform a minimal handling of preventing
+ * HW attentions from being reasserted.
+ */
+ if (ops && ops->schedule_hw_err_handler)
+ ops->schedule_hw_err_handler(cookie, err_type);
+ else
+ qed_int_attn_clr_enable(p_hwfn->cdev, true);
+}
+
+static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal,
+ void *handle)
+{
+ return qed_set_queue_coalesce(rx_coal, tx_coal, handle);
+}
+
+static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *ptt;
+ int status = 0;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EAGAIN;
+
+ status = qed_mcp_set_led(hwfn, ptt, mode);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return status;
+}
+
+int qed_recovery_process(struct qed_dev *cdev)
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *p_ptt;
+ int rc = 0;
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EAGAIN;
+
+ rc = qed_start_recovery_process(p_hwfn, p_ptt);
+
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
+static int qed_update_wol(struct qed_dev *cdev, bool enabled)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *ptt;
+ int rc = 0;
+
+ if (IS_VF(cdev))
+ return 0;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EAGAIN;
+
+ rc = qed_mcp_ov_update_wol(hwfn, ptt, enabled ? QED_OV_WOL_ENABLED
+ : QED_OV_WOL_DISABLED);
+ if (rc)
+ goto out;
+ rc = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
+
+out:
+ qed_ptt_release(hwfn, ptt);
+ return rc;
+}
+
+static int qed_update_drv_state(struct qed_dev *cdev, bool active)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *ptt;
+ int status = 0;
+
+ if (IS_VF(cdev))
+ return 0;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EAGAIN;
+
+ status = qed_mcp_ov_update_driver_state(hwfn, ptt, active ?
+ QED_OV_DRIVER_STATE_ACTIVE :
+ QED_OV_DRIVER_STATE_DISABLED);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return status;
+}
+
+static int qed_update_mac(struct qed_dev *cdev, const u8 *mac)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *ptt;
+ int status = 0;
+
+ if (IS_VF(cdev))
+ return 0;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EAGAIN;
+
+ status = qed_mcp_ov_update_mac(hwfn, ptt, mac);
+ if (status)
+ goto out;
+
+ status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
+
+out:
+ qed_ptt_release(hwfn, ptt);
+ return status;
+}
+
+static int qed_update_mtu(struct qed_dev *cdev, u16 mtu)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *ptt;
+ int status = 0;
+
+ if (IS_VF(cdev))
+ return 0;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EAGAIN;
+
+ status = qed_mcp_ov_update_mtu(hwfn, ptt, mtu);
+ if (status)
+ goto out;
+
+ status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
+
+out:
+ qed_ptt_release(hwfn, ptt);
+ return status;
+}
+
+static int
+qed_get_sb_info(struct qed_dev *cdev, struct qed_sb_info *sb,
+ u16 qid, struct qed_sb_info_dbg *sb_dbg)
+{
+ struct qed_hwfn *hwfn = &cdev->hwfns[qid % cdev->num_hwfns];
+ struct qed_ptt *ptt;
+ int rc;
+
+ if (IS_VF(cdev))
+ return -EINVAL;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt) {
+ DP_NOTICE(hwfn, "Can't acquire PTT\n");
+ return -EAGAIN;
+ }
+
+ memset(sb_dbg, 0, sizeof(*sb_dbg));
+ rc = qed_int_get_sb_dbg(hwfn, ptt, sb, sb_dbg);
+
+ qed_ptt_release(hwfn, ptt);
+ return rc;
+}
+
+static int qed_read_module_eeprom(struct qed_dev *cdev, char *buf,
+ u8 dev_addr, u32 offset, u32 len)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *ptt;
+ int rc = 0;
+
+ if (IS_VF(cdev))
+ return 0;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EAGAIN;
+
+ rc = qed_mcp_phy_sfp_read(hwfn, ptt, MFW_PORT(hwfn), dev_addr,
+ offset, len, buf);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return rc;
+}
+
+static int qed_set_grc_config(struct qed_dev *cdev, u32 cfg_id, u32 val)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *ptt;
+ int rc = 0;
+
+ if (IS_VF(cdev))
+ return 0;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EAGAIN;
+
+ rc = qed_dbg_grc_config(hwfn, cfg_id, val);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return rc;
+}
+
+static __printf(2, 3) void qed_mfw_report(struct qed_dev *cdev, char *fmt, ...)
+{
+ char buf[QED_MFW_REPORT_STR_SIZE];
+ struct qed_hwfn *p_hwfn;
+ struct qed_ptt *p_ptt;
+ va_list vl;
+
+ va_start(vl, fmt);
+ vsnprintf(buf, QED_MFW_REPORT_STR_SIZE, fmt, vl);
+ va_end(vl);
+
+ if (IS_PF(cdev)) {
+ p_hwfn = QED_LEADING_HWFN(cdev);
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (p_ptt) {
+ qed_mcp_send_raw_debug_data(p_hwfn, p_ptt, buf, strlen(buf));
+ qed_ptt_release(p_hwfn, p_ptt);
+ }
+ }
+}
+
+static u8 qed_get_affin_hwfn_idx(struct qed_dev *cdev)
+{
+ return QED_AFFIN_HWFN_IDX(cdev);
+}
+
+static int qed_get_esl_status(struct qed_dev *cdev, bool *esl_active)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *ptt;
+ int rc = 0;
+
+ *esl_active = false;
+
+ if (IS_VF(cdev))
+ return 0;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EAGAIN;
+
+ rc = qed_mcp_get_esl_status(hwfn, ptt, esl_active);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return rc;
+}
+
+static struct qed_selftest_ops qed_selftest_ops_pass = {
+ .selftest_memory = &qed_selftest_memory,
+ .selftest_interrupt = &qed_selftest_interrupt,
+ .selftest_register = &qed_selftest_register,
+ .selftest_clock = &qed_selftest_clock,
+ .selftest_nvram = &qed_selftest_nvram,
+};
+
+const struct qed_common_ops qed_common_ops_pass = {
+ .selftest = &qed_selftest_ops_pass,
+ .probe = &qed_probe,
+ .remove = &qed_remove,
+ .set_power_state = &qed_set_power_state,
+ .set_name = &qed_set_name,
+ .update_pf_params = &qed_update_pf_params,
+ .slowpath_start = &qed_slowpath_start,
+ .slowpath_stop = &qed_slowpath_stop,
+ .set_fp_int = &qed_set_int_fp,
+ .get_fp_int = &qed_get_int_fp,
+ .sb_init = &qed_sb_init,
+ .sb_release = &qed_sb_release,
+ .simd_handler_config = &qed_simd_handler_config,
+ .simd_handler_clean = &qed_simd_handler_clean,
+ .dbg_grc = &qed_dbg_grc,
+ .dbg_grc_size = &qed_dbg_grc_size,
+ .can_link_change = &qed_can_link_change,
+ .set_link = &qed_set_link,
+ .get_link = &qed_get_current_link,
+ .drain = &qed_drain,
+ .update_msglvl = &qed_init_dp,
+ .devlink_register = qed_devlink_register,
+ .devlink_unregister = qed_devlink_unregister,
+ .report_fatal_error = qed_report_fatal_error,
+ .dbg_all_data = &qed_dbg_all_data,
+ .dbg_all_data_size = &qed_dbg_all_data_size,
+ .chain_alloc = &qed_chain_alloc,
+ .chain_free = &qed_chain_free,
+ .nvm_flash = &qed_nvm_flash,
+ .nvm_get_image = &qed_nvm_get_image,
+ .set_coalesce = &qed_set_coalesce,
+ .set_led = &qed_set_led,
+ .recovery_process = &qed_recovery_process,
+ .recovery_prolog = &qed_recovery_prolog,
+ .attn_clr_enable = &qed_int_attn_clr_enable,
+ .update_drv_state = &qed_update_drv_state,
+ .update_mac = &qed_update_mac,
+ .update_mtu = &qed_update_mtu,
+ .update_wol = &qed_update_wol,
+ .db_recovery_add = &qed_db_recovery_add,
+ .db_recovery_del = &qed_db_recovery_del,
+ .read_module_eeprom = &qed_read_module_eeprom,
+ .get_affin_hwfn_idx = &qed_get_affin_hwfn_idx,
+ .read_nvm_cfg = &qed_nvm_flash_cfg_read,
+ .read_nvm_cfg_len = &qed_nvm_flash_cfg_len,
+ .set_grc_config = &qed_set_grc_config,
+ .mfw_report = &qed_mfw_report,
+ .get_sb_info = &qed_get_sb_info,
+ .get_esl_status = &qed_get_esl_status,
+};
+
+void qed_get_protocol_stats(struct qed_dev *cdev,
+ enum qed_mcp_protocol_type type,
+ union qed_mcp_protocol_stats *stats)
+{
+ struct qed_eth_stats eth_stats;
+
+ memset(stats, 0, sizeof(*stats));
+
+ switch (type) {
+ case QED_MCP_LAN_STATS:
+ qed_get_vport_stats_context(cdev, &eth_stats, true);
+ stats->lan_stats.ucast_rx_pkts =
+ eth_stats.common.rx_ucast_pkts;
+ stats->lan_stats.ucast_tx_pkts =
+ eth_stats.common.tx_ucast_pkts;
+ stats->lan_stats.fcs_err = -1;
+ break;
+ case QED_MCP_FCOE_STATS:
+ qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats, true);
+ break;
+ case QED_MCP_ISCSI_STATS:
+ qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats, true);
+ break;
+ default:
+ DP_VERBOSE(cdev, QED_MSG_SP,
+ "Invalid protocol type = %d\n", type);
+ return;
+ }
+}
+
+int qed_mfw_tlv_req(struct qed_hwfn *hwfn)
+{
+ DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV,
+ "Scheduling slowpath task [Flag: %d]\n",
+ QED_SLOWPATH_MFW_TLV_REQ);
+ /* Memory barrier for setting atomic bit */
+ smp_mb__before_atomic();
+ set_bit(QED_SLOWPATH_MFW_TLV_REQ, &hwfn->slowpath_task_flags);
+ /* Memory barrier after setting atomic bit */
+ smp_mb__after_atomic();
+ queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0);
+
+ return 0;
+}
+
+static void
+qed_fill_generic_tlv_data(struct qed_dev *cdev, struct qed_mfw_tlv_generic *tlv)
+{
+ struct qed_common_cb_ops *op = cdev->protocol_ops.common;
+ struct qed_eth_stats_common *p_common;
+ struct qed_generic_tlvs gen_tlvs;
+ struct qed_eth_stats stats;
+ int i;
+
+ memset(&gen_tlvs, 0, sizeof(gen_tlvs));
+ op->get_generic_tlv_data(cdev->ops_cookie, &gen_tlvs);
+
+ if (gen_tlvs.feat_flags & QED_TLV_IP_CSUM)
+ tlv->flags.ipv4_csum_offload = true;
+ if (gen_tlvs.feat_flags & QED_TLV_LSO)
+ tlv->flags.lso_supported = true;
+ tlv->flags.b_set = true;
+
+ for (i = 0; i < QED_TLV_MAC_COUNT; i++) {
+ if (is_valid_ether_addr(gen_tlvs.mac[i])) {
+ ether_addr_copy(tlv->mac[i], gen_tlvs.mac[i]);
+ tlv->mac_set[i] = true;
+ }
+ }
+
+ qed_get_vport_stats(cdev, &stats);
+ p_common = &stats.common;
+ tlv->rx_frames = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
+ p_common->rx_bcast_pkts;
+ tlv->rx_frames_set = true;
+ tlv->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes +
+ p_common->rx_bcast_bytes;
+ tlv->rx_bytes_set = true;
+ tlv->tx_frames = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
+ p_common->tx_bcast_pkts;
+ tlv->tx_frames_set = true;
+ tlv->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes +
+ p_common->tx_bcast_bytes;
+ tlv->rx_bytes_set = true;
+}
+
+int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, enum qed_mfw_tlv_type type,
+ union qed_mfw_tlv_data *tlv_buf)
+{
+ struct qed_dev *cdev = hwfn->cdev;
+ struct qed_common_cb_ops *ops;
+
+ ops = cdev->protocol_ops.common;
+ if (!ops || !ops->get_protocol_tlv_data || !ops->get_generic_tlv_data) {
+ DP_NOTICE(hwfn, "Can't collect TLV management info\n");
+ return -EINVAL;
+ }
+
+ switch (type) {
+ case QED_MFW_TLV_GENERIC:
+ qed_fill_generic_tlv_data(hwfn->cdev, &tlv_buf->generic);
+ break;
+ case QED_MFW_TLV_ETH:
+ ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->eth);
+ break;
+ case QED_MFW_TLV_FCOE:
+ ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->fcoe);
+ break;
+ case QED_MFW_TLV_ISCSI:
+ ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->iscsi);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+unsigned long qed_get_epoch_time(void)
+{
+ return ktime_get_real_seconds();
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
new file mode 100644
index 000000000..16e6bd466
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -0,0 +1,4244 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ * Copyright (c) 2019-2020 Marvell International Ltd.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/etherdevice.h>
+#include "qed.h"
+#include "qed_cxt.h"
+#include "qed_dcbx.h"
+#include "qed_hsi.h"
+#include "qed_mfw_hsi.h"
+#include "qed_hw.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+#include "qed_sriov.h"
+
+#define GRCBASE_MCP 0xe00000
+
+#define QED_MCP_RESP_ITER_US 10
+
+#define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
+#define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
+
+#define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
+ qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + (_offset)), \
+ _val)
+
+#define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
+ qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + (_offset)))
+
+#define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
+ DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
+ offsetof(struct public_drv_mb, _field), _val)
+
+#define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
+ DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
+ offsetof(struct public_drv_mb, _field))
+
+#define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
+ DRV_ID_PDA_COMP_VER_SHIFT)
+
+#define MCP_BYTES_PER_MBIT_SHIFT 17
+
+bool qed_mcp_is_init(struct qed_hwfn *p_hwfn)
+{
+ if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
+ return false;
+ return true;
+}
+
+void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_PORT);
+ u32 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, addr);
+
+ p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
+ MFW_PORT(p_hwfn));
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "port_addr = 0x%x, port_id 0x%02x\n",
+ p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
+}
+
+void qed_mcp_read_mb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
+ u32 tmp, i;
+
+ if (!p_hwfn->mcp_info->public_base)
+ return;
+
+ for (i = 0; i < length; i++) {
+ tmp = qed_rd(p_hwfn, p_ptt,
+ p_hwfn->mcp_info->mfw_mb_addr +
+ (i << 2) + sizeof(u32));
+
+ /* The MB data is actually BE; Need to force it to cpu */
+ ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
+ be32_to_cpu((__force __be32)tmp);
+ }
+}
+
+struct qed_mcp_cmd_elem {
+ struct list_head list;
+ struct qed_mcp_mb_params *p_mb_params;
+ u16 expected_seq_num;
+ bool b_is_completed;
+};
+
+/* Must be called while cmd_lock is acquired */
+static struct qed_mcp_cmd_elem *
+qed_mcp_cmd_add_elem(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_mb_params *p_mb_params,
+ u16 expected_seq_num)
+{
+ struct qed_mcp_cmd_elem *p_cmd_elem = NULL;
+
+ p_cmd_elem = kzalloc(sizeof(*p_cmd_elem), GFP_ATOMIC);
+ if (!p_cmd_elem)
+ goto out;
+
+ p_cmd_elem->p_mb_params = p_mb_params;
+ p_cmd_elem->expected_seq_num = expected_seq_num;
+ list_add(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list);
+out:
+ return p_cmd_elem;
+}
+
+/* Must be called while cmd_lock is acquired */
+static void qed_mcp_cmd_del_elem(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_cmd_elem *p_cmd_elem)
+{
+ list_del(&p_cmd_elem->list);
+ kfree(p_cmd_elem);
+}
+
+/* Must be called while cmd_lock is acquired */
+static struct qed_mcp_cmd_elem *qed_mcp_cmd_get_elem(struct qed_hwfn *p_hwfn,
+ u16 seq_num)
+{
+ struct qed_mcp_cmd_elem *p_cmd_elem = NULL;
+
+ list_for_each_entry(p_cmd_elem, &p_hwfn->mcp_info->cmd_list, list) {
+ if (p_cmd_elem->expected_seq_num == seq_num)
+ return p_cmd_elem;
+ }
+
+ return NULL;
+}
+
+int qed_mcp_free(struct qed_hwfn *p_hwfn)
+{
+ if (p_hwfn->mcp_info) {
+ struct qed_mcp_cmd_elem *p_cmd_elem = NULL, *p_tmp;
+
+ kfree(p_hwfn->mcp_info->mfw_mb_cur);
+ kfree(p_hwfn->mcp_info->mfw_mb_shadow);
+
+ spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
+ list_for_each_entry_safe(p_cmd_elem,
+ p_tmp,
+ &p_hwfn->mcp_info->cmd_list, list) {
+ qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
+ }
+ spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
+ }
+
+ kfree(p_hwfn->mcp_info);
+ p_hwfn->mcp_info = NULL;
+
+ return 0;
+}
+
+/* Maximum of 1 sec to wait for the SHMEM ready indication */
+#define QED_MCP_SHMEM_RDY_MAX_RETRIES 20
+#define QED_MCP_SHMEM_RDY_ITER_MS 50
+
+static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_mcp_info *p_info = p_hwfn->mcp_info;
+ u8 cnt = QED_MCP_SHMEM_RDY_MAX_RETRIES;
+ u8 msec = QED_MCP_SHMEM_RDY_ITER_MS;
+ u32 drv_mb_offsize, mfw_mb_offsize;
+ u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
+
+ p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
+ if (!p_info->public_base) {
+ DP_NOTICE(p_hwfn,
+ "The address of the MCP scratch-pad is not configured\n");
+ return -EINVAL;
+ }
+
+ p_info->public_base |= GRCBASE_MCP;
+
+ /* Get the MFW MB address and number of supported messages */
+ mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
+ SECTION_OFFSIZE_ADDR(p_info->public_base,
+ PUBLIC_MFW_MB));
+ p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
+ p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt,
+ p_info->mfw_mb_addr +
+ offsetof(struct public_mfw_mb,
+ sup_msgs));
+
+ /* The driver can notify that there was an MCP reset, and might read the
+ * SHMEM values before the MFW has completed initializing them.
+ * To avoid this, the "sup_msgs" field in the MFW mailbox is used as a
+ * data ready indication.
+ */
+ while (!p_info->mfw_mb_length && --cnt) {
+ msleep(msec);
+ p_info->mfw_mb_length =
+ (u16)qed_rd(p_hwfn, p_ptt,
+ p_info->mfw_mb_addr +
+ offsetof(struct public_mfw_mb, sup_msgs));
+ }
+
+ if (!cnt) {
+ DP_NOTICE(p_hwfn,
+ "Failed to get the SHMEM ready notification after %d msec\n",
+ QED_MCP_SHMEM_RDY_MAX_RETRIES * msec);
+ return -EBUSY;
+ }
+
+ /* Calculate the driver and MFW mailbox address */
+ drv_mb_offsize = qed_rd(p_hwfn, p_ptt,
+ SECTION_OFFSIZE_ADDR(p_info->public_base,
+ PUBLIC_DRV_MB));
+ p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
+ drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
+
+ /* Get the current driver mailbox sequence before sending
+ * the first command
+ */
+ p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
+ DRV_MSG_SEQ_NUMBER_MASK;
+
+ /* Get current FW pulse sequence */
+ p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
+ DRV_PULSE_SEQ_MASK;
+
+ p_info->mcp_hist = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
+
+ return 0;
+}
+
+int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_mcp_info *p_info;
+ u32 size;
+
+ /* Allocate mcp_info structure */
+ p_hwfn->mcp_info = kzalloc(sizeof(*p_hwfn->mcp_info), GFP_KERNEL);
+ if (!p_hwfn->mcp_info)
+ goto err;
+ p_info = p_hwfn->mcp_info;
+
+ /* Initialize the MFW spinlock */
+ spin_lock_init(&p_info->cmd_lock);
+ spin_lock_init(&p_info->link_lock);
+ spin_lock_init(&p_info->unload_lock);
+
+ INIT_LIST_HEAD(&p_info->cmd_list);
+
+ if (qed_load_mcp_offsets(p_hwfn, p_ptt) != 0) {
+ DP_NOTICE(p_hwfn, "MCP is not initialized\n");
+ /* Do not free mcp_info here, since public_base indicate that
+ * the MCP is not initialized
+ */
+ return 0;
+ }
+
+ size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
+ p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL);
+ p_info->mfw_mb_shadow = kzalloc(size, GFP_KERNEL);
+ if (!p_info->mfw_mb_cur || !p_info->mfw_mb_shadow)
+ goto err;
+
+ return 0;
+
+err:
+ qed_mcp_free(p_hwfn);
+ return -ENOMEM;
+}
+
+static void qed_mcp_reread_offsets(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 generic_por_0 = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
+
+ /* Use MCP history register to check if MCP reset occurred between init
+ * time and now.
+ */
+ if (p_hwfn->mcp_info->mcp_hist != generic_por_0) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SP,
+ "Rereading MCP offsets [mcp_hist 0x%08x, generic_por_0 0x%08x]\n",
+ p_hwfn->mcp_info->mcp_hist, generic_por_0);
+
+ qed_load_mcp_offsets(p_hwfn, p_ptt);
+ qed_mcp_cmd_port_init(p_hwfn, p_ptt);
+ }
+}
+
+int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 org_mcp_reset_seq, seq, delay = QED_MCP_RESP_ITER_US, cnt = 0;
+ int rc = 0;
+
+ if (p_hwfn->mcp_info->b_block_cmd) {
+ DP_NOTICE(p_hwfn,
+ "The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n");
+ return -EBUSY;
+ }
+
+ /* Ensure that only a single thread is accessing the mailbox */
+ spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
+
+ org_mcp_reset_seq = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
+
+ /* Set drv command along with the updated sequence */
+ qed_mcp_reread_offsets(p_hwfn, p_ptt);
+ seq = ++p_hwfn->mcp_info->drv_mb_seq;
+ DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
+
+ do {
+ /* Wait for MFW response */
+ udelay(delay);
+ /* Give the FW up to 500 second (50*1000*10usec) */
+ } while ((org_mcp_reset_seq == qed_rd(p_hwfn, p_ptt,
+ MISCS_REG_GENERIC_POR_0)) &&
+ (cnt++ < QED_MCP_RESET_RETRIES));
+
+ if (org_mcp_reset_seq !=
+ qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "MCP was reset after %d usec\n", cnt * delay);
+ } else {
+ DP_ERR(p_hwfn, "Failed to reset MCP\n");
+ rc = -EAGAIN;
+ }
+
+ spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
+
+ return rc;
+}
+
+/* Must be called while cmd_lock is acquired */
+static bool qed_mcp_has_pending_cmd(struct qed_hwfn *p_hwfn)
+{
+ struct qed_mcp_cmd_elem *p_cmd_elem;
+
+ /* There is at most one pending command at a certain time, and if it
+ * exists - it is placed at the HEAD of the list.
+ */
+ if (!list_empty(&p_hwfn->mcp_info->cmd_list)) {
+ p_cmd_elem = list_first_entry(&p_hwfn->mcp_info->cmd_list,
+ struct qed_mcp_cmd_elem, list);
+ return !p_cmd_elem->b_is_completed;
+ }
+
+ return false;
+}
+
+/* Must be called while cmd_lock is acquired */
+static int
+qed_mcp_update_pending_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_mcp_mb_params *p_mb_params;
+ struct qed_mcp_cmd_elem *p_cmd_elem;
+ u32 mcp_resp;
+ u16 seq_num;
+
+ mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
+ seq_num = (u16)(mcp_resp & FW_MSG_SEQ_NUMBER_MASK);
+
+ /* Return if no new non-handled response has been received */
+ if (seq_num != p_hwfn->mcp_info->drv_mb_seq)
+ return -EAGAIN;
+
+ p_cmd_elem = qed_mcp_cmd_get_elem(p_hwfn, seq_num);
+ if (!p_cmd_elem) {
+ DP_ERR(p_hwfn,
+ "Failed to find a pending mailbox cmd that expects sequence number %d\n",
+ seq_num);
+ return -EINVAL;
+ }
+
+ p_mb_params = p_cmd_elem->p_mb_params;
+
+ /* Get the MFW response along with the sequence number */
+ p_mb_params->mcp_resp = mcp_resp;
+
+ /* Get the MFW param */
+ p_mb_params->mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
+
+ /* Get the union data */
+ if (p_mb_params->p_data_dst && p_mb_params->data_dst_size) {
+ u32 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
+ offsetof(struct public_drv_mb,
+ union_data);
+ qed_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst,
+ union_data_addr, p_mb_params->data_dst_size);
+ }
+
+ p_cmd_elem->b_is_completed = true;
+
+ return 0;
+}
+
+/* Must be called while cmd_lock is acquired */
+static void __qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_mcp_mb_params *p_mb_params,
+ u16 seq_num)
+{
+ union drv_union_data union_data;
+ u32 union_data_addr;
+
+ /* Set the union data */
+ union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
+ offsetof(struct public_drv_mb, union_data);
+ memset(&union_data, 0, sizeof(union_data));
+ if (p_mb_params->p_data_src && p_mb_params->data_src_size)
+ memcpy(&union_data, p_mb_params->p_data_src,
+ p_mb_params->data_src_size);
+ qed_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data,
+ sizeof(union_data));
+
+ /* Set the drv param */
+ DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, p_mb_params->param);
+
+ /* Set the drv command along with the sequence number */
+ DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (p_mb_params->cmd | seq_num));
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "MFW mailbox: command 0x%08x param 0x%08x\n",
+ (p_mb_params->cmd | seq_num), p_mb_params->param);
+}
+
+static void qed_mcp_cmd_set_blocking(struct qed_hwfn *p_hwfn, bool block_cmd)
+{
+ p_hwfn->mcp_info->b_block_cmd = block_cmd;
+
+ DP_INFO(p_hwfn, "%s sending of mailbox commands to the MFW\n",
+ block_cmd ? "Block" : "Unblock");
+}
+
+static void qed_mcp_print_cpu_info(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2;
+ u32 delay = QED_MCP_RESP_ITER_US;
+
+ cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
+ cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
+ cpu_pc_0 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
+ udelay(delay);
+ cpu_pc_1 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
+ udelay(delay);
+ cpu_pc_2 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER);
+
+ DP_NOTICE(p_hwfn,
+ "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n",
+ cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2);
+}
+
+static int
+_qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_mcp_mb_params *p_mb_params,
+ u32 max_retries, u32 usecs)
+{
+ u32 cnt = 0, msecs = DIV_ROUND_UP(usecs, 1000);
+ struct qed_mcp_cmd_elem *p_cmd_elem;
+ u16 seq_num;
+ int rc = 0;
+
+ /* Wait until the mailbox is non-occupied */
+ do {
+ /* Exit the loop if there is no pending command, or if the
+ * pending command is completed during this iteration.
+ * The spinlock stays locked until the command is sent.
+ */
+
+ spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
+
+ if (!qed_mcp_has_pending_cmd(p_hwfn))
+ break;
+
+ rc = qed_mcp_update_pending_cmd(p_hwfn, p_ptt);
+ if (!rc)
+ break;
+ else if (rc != -EAGAIN)
+ goto err;
+
+ spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
+
+ if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP))
+ msleep(msecs);
+ else
+ udelay(usecs);
+ } while (++cnt < max_retries);
+
+ if (cnt >= max_retries) {
+ DP_NOTICE(p_hwfn,
+ "The MFW mailbox is occupied by an uncompleted command. Failed to send command 0x%08x [param 0x%08x].\n",
+ p_mb_params->cmd, p_mb_params->param);
+ return -EAGAIN;
+ }
+
+ /* Send the mailbox command */
+ qed_mcp_reread_offsets(p_hwfn, p_ptt);
+ seq_num = ++p_hwfn->mcp_info->drv_mb_seq;
+ p_cmd_elem = qed_mcp_cmd_add_elem(p_hwfn, p_mb_params, seq_num);
+ if (!p_cmd_elem) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ __qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, seq_num);
+ spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
+
+ /* Wait for the MFW response */
+ do {
+ /* Exit the loop if the command is already completed, or if the
+ * command is completed during this iteration.
+ * The spinlock stays locked until the list element is removed.
+ */
+
+ if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP))
+ msleep(msecs);
+ else
+ udelay(usecs);
+
+ spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
+
+ if (p_cmd_elem->b_is_completed)
+ break;
+
+ rc = qed_mcp_update_pending_cmd(p_hwfn, p_ptt);
+ if (!rc)
+ break;
+ else if (rc != -EAGAIN)
+ goto err;
+
+ spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
+ } while (++cnt < max_retries);
+
+ if (cnt >= max_retries) {
+ DP_NOTICE(p_hwfn,
+ "The MFW failed to respond to command 0x%08x [param 0x%08x].\n",
+ p_mb_params->cmd, p_mb_params->param);
+ qed_mcp_print_cpu_info(p_hwfn, p_ptt);
+
+ spin_lock_bh(&p_hwfn->mcp_info->cmd_lock);
+ qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
+ spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
+
+ if (!QED_MB_FLAGS_IS_SET(p_mb_params, AVOID_BLOCK))
+ qed_mcp_cmd_set_blocking(p_hwfn, true);
+
+ qed_hw_err_notify(p_hwfn, p_ptt,
+ QED_HW_ERR_MFW_RESP_FAIL, NULL);
+ return -EAGAIN;
+ }
+
+ qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
+ spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SP,
+ "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n",
+ p_mb_params->mcp_resp,
+ p_mb_params->mcp_param,
+ (cnt * usecs) / 1000, (cnt * usecs) % 1000);
+
+ /* Clear the sequence number from the MFW response */
+ p_mb_params->mcp_resp &= FW_MSG_CODE_MASK;
+
+ return 0;
+
+err:
+ spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock);
+ return rc;
+}
+
+static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_mcp_mb_params *p_mb_params)
+{
+ size_t union_data_size = sizeof(union drv_union_data);
+ u32 max_retries = QED_DRV_MB_MAX_RETRIES;
+ u32 usecs = QED_MCP_RESP_ITER_US;
+
+ /* MCP not initialized */
+ if (!qed_mcp_is_init(p_hwfn)) {
+ DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
+ return -EBUSY;
+ }
+
+ if (p_hwfn->mcp_info->b_block_cmd) {
+ DP_NOTICE(p_hwfn,
+ "The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n",
+ p_mb_params->cmd, p_mb_params->param);
+ return -EBUSY;
+ }
+
+ if (p_mb_params->data_src_size > union_data_size ||
+ p_mb_params->data_dst_size > union_data_size) {
+ DP_ERR(p_hwfn,
+ "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n",
+ p_mb_params->data_src_size,
+ p_mb_params->data_dst_size, union_data_size);
+ return -EINVAL;
+ }
+
+ if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) {
+ max_retries = DIV_ROUND_UP(max_retries, 1000);
+ usecs *= 1000;
+ }
+
+ return _qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries,
+ usecs);
+}
+
+static int _qed_mcp_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 cmd,
+ u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param,
+ bool can_sleep)
+{
+ struct qed_mcp_mb_params mb_params;
+ int rc;
+
+ memset(&mb_params, 0, sizeof(mb_params));
+ mb_params.cmd = cmd;
+ mb_params.param = param;
+ mb_params.flags = can_sleep ? QED_MB_FLAG_CAN_SLEEP : 0;
+
+ rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc)
+ return rc;
+
+ *o_mcp_resp = mb_params.mcp_resp;
+ *o_mcp_param = mb_params.mcp_param;
+
+ return 0;
+}
+
+int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 cmd,
+ u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param)
+{
+ return (_qed_mcp_cmd(p_hwfn, p_ptt, cmd, param,
+ o_mcp_resp, o_mcp_param, true));
+}
+
+int qed_mcp_cmd_nosleep(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 cmd,
+ u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param)
+{
+ return (_qed_mcp_cmd(p_hwfn, p_ptt, cmd, param,
+ o_mcp_resp, o_mcp_param, false));
+}
+
+static int
+qed_mcp_nvm_wr_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 cmd,
+ u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param, u32 i_txn_size, u32 *i_buf)
+{
+ struct qed_mcp_mb_params mb_params;
+ int rc;
+
+ memset(&mb_params, 0, sizeof(mb_params));
+ mb_params.cmd = cmd;
+ mb_params.param = param;
+ mb_params.p_data_src = i_buf;
+ mb_params.data_src_size = (u8)i_txn_size;
+ rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc)
+ return rc;
+
+ *o_mcp_resp = mb_params.mcp_resp;
+ *o_mcp_param = mb_params.mcp_param;
+
+ /* nvm_info needs to be updated */
+ p_hwfn->nvm_info.valid = false;
+
+ return 0;
+}
+
+int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 cmd,
+ u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param,
+ u32 *o_txn_size, u32 *o_buf, bool b_can_sleep)
+{
+ struct qed_mcp_mb_params mb_params;
+ u8 raw_data[MCP_DRV_NVM_BUF_LEN];
+ int rc;
+
+ memset(&mb_params, 0, sizeof(mb_params));
+ mb_params.cmd = cmd;
+ mb_params.param = param;
+ mb_params.p_data_dst = raw_data;
+
+ /* Use the maximal value since the actual one is part of the response */
+ mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN;
+ if (b_can_sleep)
+ mb_params.flags = QED_MB_FLAG_CAN_SLEEP;
+
+ rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc)
+ return rc;
+
+ *o_mcp_resp = mb_params.mcp_resp;
+ *o_mcp_param = mb_params.mcp_param;
+
+ *o_txn_size = *o_mcp_param;
+ memcpy(o_buf, raw_data, *o_txn_size);
+
+ return 0;
+}
+
+static bool
+qed_mcp_can_force_load(u8 drv_role,
+ u8 exist_drv_role,
+ enum qed_override_force_load override_force_load)
+{
+ bool can_force_load = false;
+
+ switch (override_force_load) {
+ case QED_OVERRIDE_FORCE_LOAD_ALWAYS:
+ can_force_load = true;
+ break;
+ case QED_OVERRIDE_FORCE_LOAD_NEVER:
+ can_force_load = false;
+ break;
+ default:
+ can_force_load = (drv_role == DRV_ROLE_OS &&
+ exist_drv_role == DRV_ROLE_PREBOOT) ||
+ (drv_role == DRV_ROLE_KDUMP &&
+ exist_drv_role == DRV_ROLE_OS);
+ break;
+ }
+
+ return can_force_load;
+}
+
+static int qed_mcp_cancel_load_req(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 resp = 0, param = 0;
+ int rc;
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0,
+ &resp, &param);
+ if (rc)
+ DP_NOTICE(p_hwfn,
+ "Failed to send cancel load request, rc = %d\n", rc);
+
+ return rc;
+}
+
+#define BITMAP_IDX_FOR_CONFIG_QEDE BIT(0)
+#define BITMAP_IDX_FOR_CONFIG_QED_SRIOV BIT(1)
+#define BITMAP_IDX_FOR_CONFIG_QEDR BIT(2)
+#define BITMAP_IDX_FOR_CONFIG_QEDF BIT(4)
+#define BITMAP_IDX_FOR_CONFIG_QEDI BIT(5)
+#define BITMAP_IDX_FOR_CONFIG_QED_LL2 BIT(6)
+
+static u32 qed_get_config_bitmap(void)
+{
+ u32 config_bitmap = 0x0;
+
+ if (IS_ENABLED(CONFIG_QEDE))
+ config_bitmap |= BITMAP_IDX_FOR_CONFIG_QEDE;
+
+ if (IS_ENABLED(CONFIG_QED_SRIOV))
+ config_bitmap |= BITMAP_IDX_FOR_CONFIG_QED_SRIOV;
+
+ if (IS_ENABLED(CONFIG_QED_RDMA))
+ config_bitmap |= BITMAP_IDX_FOR_CONFIG_QEDR;
+
+ if (IS_ENABLED(CONFIG_QED_FCOE))
+ config_bitmap |= BITMAP_IDX_FOR_CONFIG_QEDF;
+
+ if (IS_ENABLED(CONFIG_QED_ISCSI))
+ config_bitmap |= BITMAP_IDX_FOR_CONFIG_QEDI;
+
+ if (IS_ENABLED(CONFIG_QED_LL2))
+ config_bitmap |= BITMAP_IDX_FOR_CONFIG_QED_LL2;
+
+ return config_bitmap;
+}
+
+struct qed_load_req_in_params {
+ u8 hsi_ver;
+#define QED_LOAD_REQ_HSI_VER_DEFAULT 0
+#define QED_LOAD_REQ_HSI_VER_1 1
+ u32 drv_ver_0;
+ u32 drv_ver_1;
+ u32 fw_ver;
+ u8 drv_role;
+ u8 timeout_val;
+ u8 force_cmd;
+ bool avoid_eng_reset;
+};
+
+struct qed_load_req_out_params {
+ u32 load_code;
+ u32 exist_drv_ver_0;
+ u32 exist_drv_ver_1;
+ u32 exist_fw_ver;
+ u8 exist_drv_role;
+ u8 mfw_hsi_ver;
+ bool drv_exists;
+};
+
+static int
+__qed_mcp_load_req(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_load_req_in_params *p_in_params,
+ struct qed_load_req_out_params *p_out_params)
+{
+ struct qed_mcp_mb_params mb_params;
+ struct load_req_stc load_req;
+ struct load_rsp_stc load_rsp;
+ u32 hsi_ver;
+ int rc;
+
+ memset(&load_req, 0, sizeof(load_req));
+ load_req.drv_ver_0 = p_in_params->drv_ver_0;
+ load_req.drv_ver_1 = p_in_params->drv_ver_1;
+ load_req.fw_ver = p_in_params->fw_ver;
+ QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_ROLE, p_in_params->drv_role);
+ QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO,
+ p_in_params->timeout_val);
+ QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FORCE,
+ p_in_params->force_cmd);
+ QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0,
+ p_in_params->avoid_eng_reset);
+
+ hsi_ver = (p_in_params->hsi_ver == QED_LOAD_REQ_HSI_VER_DEFAULT) ?
+ DRV_ID_MCP_HSI_VER_CURRENT :
+ (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_SHIFT);
+
+ memset(&mb_params, 0, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_LOAD_REQ;
+ mb_params.param = PDA_COMP | hsi_ver | p_hwfn->cdev->drv_type;
+ mb_params.p_data_src = &load_req;
+ mb_params.data_src_size = sizeof(load_req);
+ mb_params.p_data_dst = &load_rsp;
+ mb_params.data_dst_size = sizeof(load_rsp);
+ mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
+ mb_params.param,
+ QED_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW),
+ QED_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_TYPE),
+ QED_MFW_GET_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER),
+ QED_MFW_GET_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER));
+
+ if (p_in_params->hsi_ver != QED_LOAD_REQ_HSI_VER_1) {
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
+ load_req.drv_ver_0,
+ load_req.drv_ver_1,
+ load_req.fw_ver,
+ load_req.misc0,
+ QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_ROLE),
+ QED_MFW_GET_FIELD(load_req.misc0,
+ LOAD_REQ_LOCK_TO),
+ QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FORCE),
+ QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0));
+ }
+
+ rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to send load request, rc = %d\n", rc);
+ return rc;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "Load Response: resp 0x%08x\n", mb_params.mcp_resp);
+ p_out_params->load_code = mb_params.mcp_resp;
+
+ if (p_in_params->hsi_ver != QED_LOAD_REQ_HSI_VER_1 &&
+ p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SP,
+ "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
+ load_rsp.drv_ver_0,
+ load_rsp.drv_ver_1,
+ load_rsp.fw_ver,
+ load_rsp.misc0,
+ QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE),
+ QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI),
+ QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0));
+
+ p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0;
+ p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1;
+ p_out_params->exist_fw_ver = load_rsp.fw_ver;
+ p_out_params->exist_drv_role =
+ QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE);
+ p_out_params->mfw_hsi_ver =
+ QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI);
+ p_out_params->drv_exists =
+ QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) &
+ LOAD_RSP_FLAGS0_DRV_EXISTS;
+ }
+
+ return 0;
+}
+
+static int eocre_get_mfw_drv_role(struct qed_hwfn *p_hwfn,
+ enum qed_drv_role drv_role,
+ u8 *p_mfw_drv_role)
+{
+ switch (drv_role) {
+ case QED_DRV_ROLE_OS:
+ *p_mfw_drv_role = DRV_ROLE_OS;
+ break;
+ case QED_DRV_ROLE_KDUMP:
+ *p_mfw_drv_role = DRV_ROLE_KDUMP;
+ break;
+ default:
+ DP_ERR(p_hwfn, "Unexpected driver role %d\n", drv_role);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+enum qed_load_req_force {
+ QED_LOAD_REQ_FORCE_NONE,
+ QED_LOAD_REQ_FORCE_PF,
+ QED_LOAD_REQ_FORCE_ALL,
+};
+
+static void qed_get_mfw_force_cmd(struct qed_hwfn *p_hwfn,
+ enum qed_load_req_force force_cmd,
+ u8 *p_mfw_force_cmd)
+{
+ switch (force_cmd) {
+ case QED_LOAD_REQ_FORCE_NONE:
+ *p_mfw_force_cmd = LOAD_REQ_FORCE_NONE;
+ break;
+ case QED_LOAD_REQ_FORCE_PF:
+ *p_mfw_force_cmd = LOAD_REQ_FORCE_PF;
+ break;
+ case QED_LOAD_REQ_FORCE_ALL:
+ *p_mfw_force_cmd = LOAD_REQ_FORCE_ALL;
+ break;
+ }
+}
+
+int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_load_req_params *p_params)
+{
+ struct qed_load_req_out_params out_params;
+ struct qed_load_req_in_params in_params;
+ u8 mfw_drv_role, mfw_force_cmd;
+ int rc;
+
+ memset(&in_params, 0, sizeof(in_params));
+ in_params.hsi_ver = QED_LOAD_REQ_HSI_VER_DEFAULT;
+ in_params.drv_ver_1 = qed_get_config_bitmap();
+ in_params.fw_ver = STORM_FW_VERSION;
+ rc = eocre_get_mfw_drv_role(p_hwfn, p_params->drv_role, &mfw_drv_role);
+ if (rc)
+ return rc;
+
+ in_params.drv_role = mfw_drv_role;
+ in_params.timeout_val = p_params->timeout_val;
+ qed_get_mfw_force_cmd(p_hwfn,
+ QED_LOAD_REQ_FORCE_NONE, &mfw_force_cmd);
+
+ in_params.force_cmd = mfw_force_cmd;
+ in_params.avoid_eng_reset = p_params->avoid_eng_reset;
+
+ memset(&out_params, 0, sizeof(out_params));
+ rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
+ if (rc)
+ return rc;
+
+ /* First handle cases where another load request should/might be sent:
+ * - MFW expects the old interface [HSI version = 1]
+ * - MFW responds that a force load request is required
+ */
+ if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) {
+ DP_INFO(p_hwfn,
+ "MFW refused a load request due to HSI > 1. Resending with HSI = 1\n");
+
+ in_params.hsi_ver = QED_LOAD_REQ_HSI_VER_1;
+ memset(&out_params, 0, sizeof(out_params));
+ rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params);
+ if (rc)
+ return rc;
+ } else if (out_params.load_code ==
+ FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) {
+ if (qed_mcp_can_force_load(in_params.drv_role,
+ out_params.exist_drv_role,
+ p_params->override_force_load)) {
+ DP_INFO(p_hwfn,
+ "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}]\n",
+ in_params.drv_role, in_params.fw_ver,
+ in_params.drv_ver_0, in_params.drv_ver_1,
+ out_params.exist_drv_role,
+ out_params.exist_fw_ver,
+ out_params.exist_drv_ver_0,
+ out_params.exist_drv_ver_1);
+
+ qed_get_mfw_force_cmd(p_hwfn,
+ QED_LOAD_REQ_FORCE_ALL,
+ &mfw_force_cmd);
+
+ in_params.force_cmd = mfw_force_cmd;
+ memset(&out_params, 0, sizeof(out_params));
+ rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params,
+ &out_params);
+ if (rc)
+ return rc;
+ } else {
+ DP_NOTICE(p_hwfn,
+ "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}] - Avoid\n",
+ in_params.drv_role, in_params.fw_ver,
+ in_params.drv_ver_0, in_params.drv_ver_1,
+ out_params.exist_drv_role,
+ out_params.exist_fw_ver,
+ out_params.exist_drv_ver_0,
+ out_params.exist_drv_ver_1);
+ DP_NOTICE(p_hwfn,
+ "Avoid sending a force load request to prevent disruption of active PFs\n");
+
+ qed_mcp_cancel_load_req(p_hwfn, p_ptt);
+ return -EBUSY;
+ }
+ }
+
+ /* Now handle the other types of responses.
+ * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not
+ * expected here after the additional revised load requests were sent.
+ */
+ switch (out_params.load_code) {
+ case FW_MSG_CODE_DRV_LOAD_ENGINE:
+ case FW_MSG_CODE_DRV_LOAD_PORT:
+ case FW_MSG_CODE_DRV_LOAD_FUNCTION:
+ if (out_params.mfw_hsi_ver != QED_LOAD_REQ_HSI_VER_1 &&
+ out_params.drv_exists) {
+ /* The role and fw/driver version match, but the PF is
+ * already loaded and has not been unloaded gracefully.
+ */
+ DP_NOTICE(p_hwfn,
+ "PF is already loaded\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ DP_NOTICE(p_hwfn,
+ "Unexpected refusal to load request [resp 0x%08x]. Aborting.\n",
+ out_params.load_code);
+ return -EBUSY;
+ }
+
+ p_params->load_code = out_params.load_code;
+
+ return 0;
+}
+
+int qed_mcp_load_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 resp = 0, param = 0;
+ int rc;
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_DONE, 0, &resp,
+ &param);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "Failed to send a LOAD_DONE command, rc = %d\n", rc);
+ return rc;
+ }
+
+ /* Check if there is a DID mismatch between nvm-cfg/efuse */
+ if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR)
+ DP_NOTICE(p_hwfn,
+ "warning: device configuration is not supported on this board type. The device may not function as expected.\n");
+
+ return 0;
+}
+
+#define MFW_COMPLETION_MAX_ITER 5000
+#define MFW_COMPLETION_INTERVAL_MS 1
+
+int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_mcp_mb_params mb_params;
+ u32 cnt = MFW_COMPLETION_MAX_ITER;
+ u32 wol_param;
+ int rc;
+
+ switch (p_hwfn->cdev->wol_config) {
+ case QED_OV_WOL_DISABLED:
+ wol_param = DRV_MB_PARAM_UNLOAD_WOL_DISABLED;
+ break;
+ case QED_OV_WOL_ENABLED:
+ wol_param = DRV_MB_PARAM_UNLOAD_WOL_ENABLED;
+ break;
+ default:
+ DP_NOTICE(p_hwfn,
+ "Unknown WoL configuration %02x\n",
+ p_hwfn->cdev->wol_config);
+ fallthrough;
+ case QED_OV_WOL_DEFAULT:
+ wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP;
+ }
+
+ memset(&mb_params, 0, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_UNLOAD_REQ;
+ mb_params.param = wol_param;
+ mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK;
+
+ spin_lock_bh(&p_hwfn->mcp_info->unload_lock);
+ set_bit(QED_MCP_BYPASS_PROC_BIT,
+ &p_hwfn->mcp_info->mcp_handling_status);
+ spin_unlock_bh(&p_hwfn->mcp_info->unload_lock);
+
+ rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+
+ while (test_bit(QED_MCP_IN_PROCESSING_BIT,
+ &p_hwfn->mcp_info->mcp_handling_status) && --cnt)
+ msleep(MFW_COMPLETION_INTERVAL_MS);
+
+ if (!cnt)
+ DP_NOTICE(p_hwfn,
+ "Failed to wait MFW event completion after %d msec\n",
+ MFW_COMPLETION_MAX_ITER * MFW_COMPLETION_INTERVAL_MS);
+
+ return rc;
+}
+
+int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_mcp_mb_params mb_params;
+ struct mcp_mac wol_mac;
+
+ memset(&mb_params, 0, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_UNLOAD_DONE;
+
+ /* Set the primary MAC if WoL is enabled */
+ if (p_hwfn->cdev->wol_config == QED_OV_WOL_ENABLED) {
+ u8 *p_mac = p_hwfn->cdev->wol_mac;
+
+ memset(&wol_mac, 0, sizeof(wol_mac));
+ wol_mac.mac_upper = p_mac[0] << 8 | p_mac[1];
+ wol_mac.mac_lower = p_mac[2] << 24 | p_mac[3] << 16 |
+ p_mac[4] << 8 | p_mac[5];
+
+ DP_VERBOSE(p_hwfn,
+ (QED_MSG_SP | NETIF_MSG_IFDOWN),
+ "Setting WoL MAC: %pM --> [%08x,%08x]\n",
+ p_mac, wol_mac.mac_upper, wol_mac.mac_lower);
+
+ mb_params.p_data_src = &wol_mac;
+ mb_params.data_src_size = sizeof(wol_mac);
+ }
+
+ return qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+}
+
+static void qed_mcp_handle_vf_flr(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_PATH);
+ u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
+ u32 path_addr = SECTION_ADDR(mfw_path_offsize,
+ QED_PATH_ID(p_hwfn));
+ u32 disabled_vfs[VF_MAX_STATIC / 32];
+ int i;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SP,
+ "Reading Disabled VF information from [offset %08x], path_addr %08x\n",
+ mfw_path_offsize, path_addr);
+
+ for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
+ disabled_vfs[i] = qed_rd(p_hwfn, p_ptt,
+ path_addr +
+ offsetof(struct public_path,
+ mcp_vf_disabled) +
+ sizeof(u32) * i);
+ DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
+ "FLR-ed VFs [%08x,...,%08x] - %08x\n",
+ i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
+ }
+
+ if (qed_iov_mark_vf_flr(p_hwfn, disabled_vfs))
+ qed_schedule_iov(p_hwfn, QED_IOV_WQ_FLR_FLAG);
+}
+
+int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *vfs_to_ack)
+{
+ u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_FUNC);
+ u32 mfw_func_offsize = qed_rd(p_hwfn, p_ptt, addr);
+ u32 func_addr = SECTION_ADDR(mfw_func_offsize,
+ MCP_PF_ID(p_hwfn));
+ struct qed_mcp_mb_params mb_params;
+ int rc;
+ int i;
+
+ for (i = 0; i < (VF_MAX_STATIC / 32); i++)
+ DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
+ "Acking VFs [%08x,...,%08x] - %08x\n",
+ i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
+
+ memset(&mb_params, 0, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
+ mb_params.p_data_src = vfs_to_ack;
+ mb_params.data_src_size = VF_MAX_STATIC / 8;
+ rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to pass ACK for VF flr to MFW\n");
+ return -EBUSY;
+ }
+
+ /* Clear the ACK bits */
+ for (i = 0; i < (VF_MAX_STATIC / 32); i++)
+ qed_wr(p_hwfn, p_ptt,
+ func_addr +
+ offsetof(struct public_func, drv_ack_vf_disabled) +
+ i * sizeof(u32), 0);
+
+ return rc;
+}
+
+static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 transceiver_state;
+
+ transceiver_state = qed_rd(p_hwfn, p_ptt,
+ p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port,
+ transceiver_data));
+
+ DP_VERBOSE(p_hwfn,
+ (NETIF_MSG_HW | QED_MSG_SP),
+ "Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n",
+ transceiver_state,
+ (u32)(p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, transceiver_data)));
+
+ transceiver_state = GET_FIELD(transceiver_state,
+ ETH_TRANSCEIVER_STATE);
+
+ if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
+ DP_NOTICE(p_hwfn, "Transceiver is present.\n");
+ else
+ DP_NOTICE(p_hwfn, "Transceiver is unplugged.\n");
+}
+
+static void qed_mcp_read_eee_config(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_mcp_link_state *p_link)
+{
+ u32 eee_status, val;
+
+ p_link->eee_adv_caps = 0;
+ p_link->eee_lp_adv_caps = 0;
+ eee_status = qed_rd(p_hwfn,
+ p_ptt,
+ p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, eee_status));
+ p_link->eee_active = !!(eee_status & EEE_ACTIVE_BIT);
+ val = (eee_status & EEE_LD_ADV_STATUS_MASK) >> EEE_LD_ADV_STATUS_OFFSET;
+ if (val & EEE_1G_ADV)
+ p_link->eee_adv_caps |= QED_EEE_1G_ADV;
+ if (val & EEE_10G_ADV)
+ p_link->eee_adv_caps |= QED_EEE_10G_ADV;
+ val = (eee_status & EEE_LP_ADV_STATUS_MASK) >> EEE_LP_ADV_STATUS_OFFSET;
+ if (val & EEE_1G_ADV)
+ p_link->eee_lp_adv_caps |= QED_EEE_1G_ADV;
+ if (val & EEE_10G_ADV)
+ p_link->eee_lp_adv_caps |= QED_EEE_10G_ADV;
+}
+
+static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct public_func *p_data, int pfid)
+{
+ u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_FUNC);
+ u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
+ u32 func_addr;
+ u32 i, size;
+
+ func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
+ memset(p_data, 0, sizeof(*p_data));
+
+ size = min_t(u32, sizeof(*p_data), QED_SECTION_SIZE(mfw_path_offsize));
+ for (i = 0; i < size / sizeof(u32); i++)
+ ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
+ func_addr + (i << 2));
+ return size;
+}
+
+static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn,
+ struct public_func *p_shmem_info)
+{
+ struct qed_mcp_function_info *p_info;
+
+ p_info = &p_hwfn->mcp_info->func_info;
+
+ p_info->bandwidth_min = QED_MFW_GET_FIELD(p_shmem_info->config,
+ FUNC_MF_CFG_MIN_BW);
+ if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
+ DP_INFO(p_hwfn,
+ "bandwidth minimum out of bounds [%02x]. Set to 1\n",
+ p_info->bandwidth_min);
+ p_info->bandwidth_min = 1;
+ }
+
+ p_info->bandwidth_max = QED_MFW_GET_FIELD(p_shmem_info->config,
+ FUNC_MF_CFG_MAX_BW);
+ if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
+ DP_INFO(p_hwfn,
+ "bandwidth maximum out of bounds [%02x]. Set to 100\n",
+ p_info->bandwidth_max);
+ p_info->bandwidth_max = 100;
+ }
+}
+
+static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, bool b_reset)
+{
+ struct qed_mcp_link_state *p_link;
+ u8 max_bw, min_bw;
+ u32 status = 0;
+
+ /* Prevent SW/attentions from doing this at the same time */
+ spin_lock_bh(&p_hwfn->mcp_info->link_lock);
+
+ p_link = &p_hwfn->mcp_info->link_output;
+ memset(p_link, 0, sizeof(*p_link));
+ if (!b_reset) {
+ status = qed_rd(p_hwfn, p_ptt,
+ p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, link_status));
+ DP_VERBOSE(p_hwfn, (NETIF_MSG_LINK | QED_MSG_SP),
+ "Received link update [0x%08x] from mfw [Addr 0x%x]\n",
+ status,
+ (u32)(p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, link_status)));
+ } else {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Resetting link indications\n");
+ goto out;
+ }
+
+ if (p_hwfn->b_drv_link_init) {
+ /* Link indication with modern MFW arrives as per-PF
+ * indication.
+ */
+ if (p_hwfn->mcp_info->capabilities &
+ FW_MB_PARAM_FEATURE_SUPPORT_VLINK) {
+ struct public_func shmem_info;
+
+ qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
+ MCP_PF_ID(p_hwfn));
+ p_link->link_up = !!(shmem_info.status &
+ FUNC_STATUS_VIRTUAL_LINK_UP);
+ qed_read_pf_bandwidth(p_hwfn, &shmem_info);
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Virtual link_up = %d\n", p_link->link_up);
+ } else {
+ p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Physical link_up = %d\n", p_link->link_up);
+ }
+ } else {
+ p_link->link_up = false;
+ }
+
+ p_link->full_duplex = true;
+ switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
+ case LINK_STATUS_SPEED_AND_DUPLEX_100G:
+ p_link->speed = 100000;
+ break;
+ case LINK_STATUS_SPEED_AND_DUPLEX_50G:
+ p_link->speed = 50000;
+ break;
+ case LINK_STATUS_SPEED_AND_DUPLEX_40G:
+ p_link->speed = 40000;
+ break;
+ case LINK_STATUS_SPEED_AND_DUPLEX_25G:
+ p_link->speed = 25000;
+ break;
+ case LINK_STATUS_SPEED_AND_DUPLEX_20G:
+ p_link->speed = 20000;
+ break;
+ case LINK_STATUS_SPEED_AND_DUPLEX_10G:
+ p_link->speed = 10000;
+ break;
+ case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
+ p_link->full_duplex = false;
+ fallthrough;
+ case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
+ p_link->speed = 1000;
+ break;
+ default:
+ p_link->speed = 0;
+ p_link->link_up = 0;
+ }
+
+ if (p_link->link_up && p_link->speed)
+ p_link->line_speed = p_link->speed;
+ else
+ p_link->line_speed = 0;
+
+ max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
+ min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
+
+ /* Max bandwidth configuration */
+ __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt, p_link, max_bw);
+
+ /* Min bandwidth configuration */
+ __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt, p_link, min_bw);
+ qed_configure_vp_wfq_on_link_change(p_hwfn->cdev, p_ptt,
+ p_link->min_pf_rate);
+
+ p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
+ p_link->an_complete = !!(status &
+ LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
+ p_link->parallel_detection = !!(status &
+ LINK_STATUS_PARALLEL_DETECTION_USED);
+ p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
+
+ p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
+ QED_LINK_PARTNER_SPEED_1G_FD : 0;
+ p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
+ QED_LINK_PARTNER_SPEED_1G_HD : 0;
+ p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
+ QED_LINK_PARTNER_SPEED_10G : 0;
+ p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
+ QED_LINK_PARTNER_SPEED_20G : 0;
+ p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
+ QED_LINK_PARTNER_SPEED_25G : 0;
+ p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
+ QED_LINK_PARTNER_SPEED_40G : 0;
+ p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
+ QED_LINK_PARTNER_SPEED_50G : 0;
+ p_link->partner_adv_speed |=
+ (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
+ QED_LINK_PARTNER_SPEED_100G : 0;
+
+ p_link->partner_tx_flow_ctrl_en =
+ !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
+ p_link->partner_rx_flow_ctrl_en =
+ !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
+
+ switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
+ case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
+ p_link->partner_adv_pause = QED_LINK_PARTNER_SYMMETRIC_PAUSE;
+ break;
+ case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
+ p_link->partner_adv_pause = QED_LINK_PARTNER_ASYMMETRIC_PAUSE;
+ break;
+ case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
+ p_link->partner_adv_pause = QED_LINK_PARTNER_BOTH_PAUSE;
+ break;
+ default:
+ p_link->partner_adv_pause = 0;
+ }
+
+ p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
+
+ if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE)
+ qed_mcp_read_eee_config(p_hwfn, p_ptt, p_link);
+
+ if (p_hwfn->mcp_info->capabilities &
+ FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL) {
+ switch (status & LINK_STATUS_FEC_MODE_MASK) {
+ case LINK_STATUS_FEC_MODE_NONE:
+ p_link->fec_active = QED_FEC_MODE_NONE;
+ break;
+ case LINK_STATUS_FEC_MODE_FIRECODE_CL74:
+ p_link->fec_active = QED_FEC_MODE_FIRECODE;
+ break;
+ case LINK_STATUS_FEC_MODE_RS_CL91:
+ p_link->fec_active = QED_FEC_MODE_RS;
+ break;
+ default:
+ p_link->fec_active = QED_FEC_MODE_AUTO;
+ }
+ } else {
+ p_link->fec_active = QED_FEC_MODE_UNSUPPORTED;
+ }
+
+ qed_link_update(p_hwfn, p_ptt);
+out:
+ spin_unlock_bh(&p_hwfn->mcp_info->link_lock);
+}
+
+int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
+{
+ struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
+ struct qed_mcp_mb_params mb_params;
+ struct eth_phy_cfg phy_cfg;
+ u32 cmd, fec_bit = 0;
+ u32 val, ext_speed;
+ int rc = 0;
+
+ /* Set the shmem configuration according to params */
+ memset(&phy_cfg, 0, sizeof(phy_cfg));
+ cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
+ if (!params->speed.autoneg)
+ phy_cfg.speed = params->speed.forced_speed;
+ phy_cfg.pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
+ phy_cfg.pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
+ phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
+ phy_cfg.adv_speed = params->speed.advertised_speeds;
+ phy_cfg.loopback_mode = params->loopback_mode;
+
+ /* There are MFWs that share this capability regardless of whether
+ * this is feasible or not. And given that at the very least adv_caps
+ * would be set internally by qed, we want to make sure LFA would
+ * still work.
+ */
+ if ((p_hwfn->mcp_info->capabilities &
+ FW_MB_PARAM_FEATURE_SUPPORT_EEE) && params->eee.enable) {
+ phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED;
+ if (params->eee.tx_lpi_enable)
+ phy_cfg.eee_cfg |= EEE_CFG_TX_LPI;
+ if (params->eee.adv_caps & QED_EEE_1G_ADV)
+ phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_1G;
+ if (params->eee.adv_caps & QED_EEE_10G_ADV)
+ phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_10G;
+ phy_cfg.eee_cfg |= (params->eee.tx_lpi_timer <<
+ EEE_TX_TIMER_USEC_OFFSET) &
+ EEE_TX_TIMER_USEC_MASK;
+ }
+
+ if (p_hwfn->mcp_info->capabilities &
+ FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL) {
+ if (params->fec & QED_FEC_MODE_NONE)
+ fec_bit |= FEC_FORCE_MODE_NONE;
+ else if (params->fec & QED_FEC_MODE_FIRECODE)
+ fec_bit |= FEC_FORCE_MODE_FIRECODE;
+ else if (params->fec & QED_FEC_MODE_RS)
+ fec_bit |= FEC_FORCE_MODE_RS;
+ else if (params->fec & QED_FEC_MODE_AUTO)
+ fec_bit |= FEC_FORCE_MODE_AUTO;
+
+ SET_MFW_FIELD(phy_cfg.fec_mode, FEC_FORCE_MODE, fec_bit);
+ }
+
+ if (p_hwfn->mcp_info->capabilities &
+ FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL) {
+ ext_speed = 0;
+ if (params->ext_speed.autoneg)
+ ext_speed |= ETH_EXT_SPEED_NONE;
+
+ val = params->ext_speed.forced_speed;
+ if (val & QED_EXT_SPEED_1G)
+ ext_speed |= ETH_EXT_SPEED_1G;
+ if (val & QED_EXT_SPEED_10G)
+ ext_speed |= ETH_EXT_SPEED_10G;
+ if (val & QED_EXT_SPEED_25G)
+ ext_speed |= ETH_EXT_SPEED_25G;
+ if (val & QED_EXT_SPEED_40G)
+ ext_speed |= ETH_EXT_SPEED_40G;
+ if (val & QED_EXT_SPEED_50G_R)
+ ext_speed |= ETH_EXT_SPEED_50G_BASE_R;
+ if (val & QED_EXT_SPEED_50G_R2)
+ ext_speed |= ETH_EXT_SPEED_50G_BASE_R2;
+ if (val & QED_EXT_SPEED_100G_R2)
+ ext_speed |= ETH_EXT_SPEED_100G_BASE_R2;
+ if (val & QED_EXT_SPEED_100G_R4)
+ ext_speed |= ETH_EXT_SPEED_100G_BASE_R4;
+ if (val & QED_EXT_SPEED_100G_P4)
+ ext_speed |= ETH_EXT_SPEED_100G_BASE_P4;
+
+ SET_MFW_FIELD(phy_cfg.extended_speed, ETH_EXT_SPEED,
+ ext_speed);
+
+ ext_speed = 0;
+
+ val = params->ext_speed.advertised_speeds;
+ if (val & QED_EXT_SPEED_MASK_1G)
+ ext_speed |= ETH_EXT_ADV_SPEED_1G;
+ if (val & QED_EXT_SPEED_MASK_10G)
+ ext_speed |= ETH_EXT_ADV_SPEED_10G;
+ if (val & QED_EXT_SPEED_MASK_25G)
+ ext_speed |= ETH_EXT_ADV_SPEED_25G;
+ if (val & QED_EXT_SPEED_MASK_40G)
+ ext_speed |= ETH_EXT_ADV_SPEED_40G;
+ if (val & QED_EXT_SPEED_MASK_50G_R)
+ ext_speed |= ETH_EXT_ADV_SPEED_50G_BASE_R;
+ if (val & QED_EXT_SPEED_MASK_50G_R2)
+ ext_speed |= ETH_EXT_ADV_SPEED_50G_BASE_R2;
+ if (val & QED_EXT_SPEED_MASK_100G_R2)
+ ext_speed |= ETH_EXT_ADV_SPEED_100G_BASE_R2;
+ if (val & QED_EXT_SPEED_MASK_100G_R4)
+ ext_speed |= ETH_EXT_ADV_SPEED_100G_BASE_R4;
+ if (val & QED_EXT_SPEED_MASK_100G_P4)
+ ext_speed |= ETH_EXT_ADV_SPEED_100G_BASE_P4;
+
+ phy_cfg.extended_speed |= ext_speed;
+
+ SET_MFW_FIELD(phy_cfg.fec_mode, FEC_EXTENDED_MODE,
+ params->ext_fec_mode);
+ }
+
+ p_hwfn->b_drv_link_init = b_up;
+
+ if (b_up) {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Configuring Link: Speed 0x%08x, Pause 0x%08x, Adv. Speed 0x%08x, Loopback 0x%08x, FEC 0x%08x, Ext. Speed 0x%08x\n",
+ phy_cfg.speed, phy_cfg.pause, phy_cfg.adv_speed,
+ phy_cfg.loopback_mode, phy_cfg.fec_mode,
+ phy_cfg.extended_speed);
+ } else {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, "Resetting link\n");
+ }
+
+ memset(&mb_params, 0, sizeof(mb_params));
+ mb_params.cmd = cmd;
+ mb_params.p_data_src = &phy_cfg;
+ mb_params.data_src_size = sizeof(phy_cfg);
+ rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+
+ /* if mcp fails to respond we must abort */
+ if (rc) {
+ DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+ return rc;
+ }
+
+ /* Mimic link-change attention, done for several reasons:
+ * - On reset, there's no guarantee MFW would trigger
+ * an attention.
+ * - On initialization, older MFWs might not indicate link change
+ * during LFA, so we'll never get an UP indication.
+ */
+ qed_mcp_handle_link_change(p_hwfn, p_ptt, !b_up);
+
+ return 0;
+}
+
+u32 qed_get_process_kill_counter(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt;
+
+ if (IS_VF(p_hwfn->cdev))
+ return -EINVAL;
+
+ path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_PATH);
+ path_offsize = qed_rd(p_hwfn, p_ptt, path_offsize_addr);
+ path_addr = SECTION_ADDR(path_offsize, QED_PATH_ID(p_hwfn));
+
+ proc_kill_cnt = qed_rd(p_hwfn, p_ptt,
+ path_addr +
+ offsetof(struct public_path, process_kill)) &
+ PROCESS_KILL_COUNTER_MASK;
+
+ return proc_kill_cnt;
+}
+
+static void qed_mcp_handle_process_kill(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct qed_dev *cdev = p_hwfn->cdev;
+ u32 proc_kill_cnt;
+
+ /* Prevent possible attentions/interrupts during the recovery handling
+ * and till its load phase, during which they will be re-enabled.
+ */
+ qed_int_igu_disable_int(p_hwfn, p_ptt);
+
+ DP_NOTICE(p_hwfn, "Received a process kill indication\n");
+
+ /* The following operations should be done once, and thus in CMT mode
+ * are carried out by only the first HW function.
+ */
+ if (p_hwfn != QED_LEADING_HWFN(cdev))
+ return;
+
+ if (cdev->recov_in_prog) {
+ DP_NOTICE(p_hwfn,
+ "Ignoring the indication since a recovery process is already in progress\n");
+ return;
+ }
+
+ cdev->recov_in_prog = true;
+
+ proc_kill_cnt = qed_get_process_kill_counter(p_hwfn, p_ptt);
+ DP_NOTICE(p_hwfn, "Process kill counter: %d\n", proc_kill_cnt);
+
+ qed_schedule_recovery_handler(p_hwfn);
+}
+
+static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum MFW_DRV_MSG_TYPE type)
+{
+ enum qed_mcp_protocol_type stats_type;
+ union qed_mcp_protocol_stats stats;
+ struct qed_mcp_mb_params mb_params;
+ u32 hsi_param;
+
+ switch (type) {
+ case MFW_DRV_MSG_GET_LAN_STATS:
+ stats_type = QED_MCP_LAN_STATS;
+ hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
+ break;
+ case MFW_DRV_MSG_GET_FCOE_STATS:
+ stats_type = QED_MCP_FCOE_STATS;
+ hsi_param = DRV_MSG_CODE_STATS_TYPE_FCOE;
+ break;
+ case MFW_DRV_MSG_GET_ISCSI_STATS:
+ stats_type = QED_MCP_ISCSI_STATS;
+ hsi_param = DRV_MSG_CODE_STATS_TYPE_ISCSI;
+ break;
+ case MFW_DRV_MSG_GET_RDMA_STATS:
+ stats_type = QED_MCP_RDMA_STATS;
+ hsi_param = DRV_MSG_CODE_STATS_TYPE_RDMA;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "Invalid protocol type %d\n", type);
+ return;
+ }
+
+ qed_get_protocol_stats(p_hwfn->cdev, stats_type, &stats);
+
+ memset(&mb_params, 0, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_GET_STATS;
+ mb_params.param = hsi_param;
+ mb_params.p_data_src = &stats;
+ mb_params.data_src_size = sizeof(stats);
+ qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+}
+
+static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_mcp_function_info *p_info;
+ struct public_func shmem_info;
+ u32 resp = 0, param = 0;
+
+ qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
+
+ qed_read_pf_bandwidth(p_hwfn, &shmem_info);
+
+ p_info = &p_hwfn->mcp_info->func_info;
+
+ qed_configure_pf_min_bandwidth(p_hwfn->cdev, p_info->bandwidth_min);
+ qed_configure_pf_max_bandwidth(p_hwfn->cdev, p_info->bandwidth_max);
+
+ /* Acknowledge the MFW */
+ qed_mcp_cmd_nosleep(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
+ &param);
+}
+
+static void qed_mcp_update_stag(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct public_func shmem_info;
+ u32 resp = 0, param = 0;
+
+ qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
+
+ p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag &
+ FUNC_MF_CFG_OV_STAG_MASK;
+ p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan;
+ if (test_bit(QED_MF_OVLAN_CLSS, &p_hwfn->cdev->mf_bits)) {
+ if (p_hwfn->hw_info.ovlan != QED_MCP_VLAN_UNSET) {
+ qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE,
+ p_hwfn->hw_info.ovlan);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 1);
+
+ /* Configure DB to add external vlan to EDPM packets */
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1);
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2,
+ p_hwfn->hw_info.ovlan);
+ } else {
+ qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 0);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE, 0);
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 0);
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2, 0);
+ }
+
+ qed_sp_pf_update_stag(p_hwfn);
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP, "ovlan = %d hw_mode = 0x%x\n",
+ p_hwfn->mcp_info->func_info.ovlan, p_hwfn->hw_info.hw_mode);
+
+ /* Acknowledge the MFW */
+ qed_mcp_cmd_nosleep(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0,
+ &resp, &param);
+}
+
+static void qed_mcp_handle_fan_failure(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ /* A single notification should be sent to upper driver in CMT mode */
+ if (p_hwfn != QED_LEADING_HWFN(p_hwfn->cdev))
+ return;
+
+ qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_FAN_FAIL,
+ "Fan failure was detected on the network interface card and it's going to be shut down.\n");
+}
+
+struct qed_mdump_cmd_params {
+ u32 cmd;
+ void *p_data_src;
+ u8 data_src_size;
+ void *p_data_dst;
+ u8 data_dst_size;
+ u32 mcp_resp;
+};
+
+static int
+qed_mcp_mdump_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_mdump_cmd_params *p_mdump_cmd_params)
+{
+ struct qed_mcp_mb_params mb_params;
+ int rc;
+
+ memset(&mb_params, 0, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_MDUMP_CMD;
+ mb_params.param = p_mdump_cmd_params->cmd;
+ mb_params.p_data_src = p_mdump_cmd_params->p_data_src;
+ mb_params.data_src_size = p_mdump_cmd_params->data_src_size;
+ mb_params.p_data_dst = p_mdump_cmd_params->p_data_dst;
+ mb_params.data_dst_size = p_mdump_cmd_params->data_dst_size;
+ rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc)
+ return rc;
+
+ p_mdump_cmd_params->mcp_resp = mb_params.mcp_resp;
+
+ if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) {
+ DP_INFO(p_hwfn,
+ "The mdump sub command is unsupported by the MFW [mdump_cmd 0x%x]\n",
+ p_mdump_cmd_params->cmd);
+ rc = -EOPNOTSUPP;
+ } else if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
+ DP_INFO(p_hwfn,
+ "The mdump command is not supported by the MFW\n");
+ rc = -EOPNOTSUPP;
+ }
+
+ return rc;
+}
+
+static int qed_mcp_mdump_ack(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_mdump_cmd_params mdump_cmd_params;
+
+ memset(&mdump_cmd_params, 0, sizeof(mdump_cmd_params));
+ mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_ACK;
+
+ return qed_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
+}
+
+int
+qed_mcp_mdump_get_retain(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct mdump_retain_data_stc *p_mdump_retain)
+{
+ struct qed_mdump_cmd_params mdump_cmd_params;
+ int rc;
+
+ memset(&mdump_cmd_params, 0, sizeof(mdump_cmd_params));
+ mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_RETAIN;
+ mdump_cmd_params.p_data_dst = p_mdump_retain;
+ mdump_cmd_params.data_dst_size = sizeof(*p_mdump_retain);
+
+ rc = qed_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params);
+ if (rc)
+ return rc;
+
+ if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) {
+ DP_INFO(p_hwfn,
+ "Failed to get the mdump retained data [mcp_resp 0x%x]\n",
+ mdump_cmd_params.mcp_resp);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void qed_mcp_handle_critical_error(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct mdump_retain_data_stc mdump_retain;
+ int rc;
+
+ /* In CMT mode - no need for more than a single acknowledgment to the
+ * MFW, and no more than a single notification to the upper driver.
+ */
+ if (p_hwfn != QED_LEADING_HWFN(p_hwfn->cdev))
+ return;
+
+ rc = qed_mcp_mdump_get_retain(p_hwfn, p_ptt, &mdump_retain);
+ if (rc == 0 && mdump_retain.valid)
+ DP_NOTICE(p_hwfn,
+ "The MFW notified that a critical error occurred in the device [epoch 0x%08x, pf 0x%x, status 0x%08x]\n",
+ mdump_retain.epoch,
+ mdump_retain.pf, mdump_retain.status);
+ else
+ DP_NOTICE(p_hwfn,
+ "The MFW notified that a critical error occurred in the device\n");
+
+ DP_NOTICE(p_hwfn,
+ "Acknowledging the notification to not allow the MFW crash dump [driver debug data collection is preferable]\n");
+ qed_mcp_mdump_ack(p_hwfn, p_ptt);
+
+ qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_HW_ATTN, NULL);
+}
+
+void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct public_func shmem_info;
+ u32 port_cfg, val;
+
+ if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
+ return;
+
+ memset(&p_hwfn->ufp_info, 0, sizeof(p_hwfn->ufp_info));
+ port_cfg = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, oem_cfg_port));
+ val = (port_cfg & OEM_CFG_CHANNEL_TYPE_MASK) >>
+ OEM_CFG_CHANNEL_TYPE_OFFSET;
+ if (val != OEM_CFG_CHANNEL_TYPE_STAGGED)
+ DP_NOTICE(p_hwfn,
+ "Incorrect UFP Channel type %d port_id 0x%02x\n",
+ val, MFW_PORT(p_hwfn));
+
+ val = (port_cfg & OEM_CFG_SCHED_TYPE_MASK) >> OEM_CFG_SCHED_TYPE_OFFSET;
+ if (val == OEM_CFG_SCHED_TYPE_ETS) {
+ p_hwfn->ufp_info.mode = QED_UFP_MODE_ETS;
+ } else if (val == OEM_CFG_SCHED_TYPE_VNIC_BW) {
+ p_hwfn->ufp_info.mode = QED_UFP_MODE_VNIC_BW;
+ } else {
+ p_hwfn->ufp_info.mode = QED_UFP_MODE_UNKNOWN;
+ DP_NOTICE(p_hwfn,
+ "Unknown UFP scheduling mode %d port_id 0x%02x\n",
+ val, MFW_PORT(p_hwfn));
+ }
+
+ qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
+ val = (shmem_info.oem_cfg_func & OEM_CFG_FUNC_TC_MASK) >>
+ OEM_CFG_FUNC_TC_OFFSET;
+ p_hwfn->ufp_info.tc = (u8)val;
+ val = (shmem_info.oem_cfg_func & OEM_CFG_FUNC_HOST_PRI_CTRL_MASK) >>
+ OEM_CFG_FUNC_HOST_PRI_CTRL_OFFSET;
+ if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC) {
+ p_hwfn->ufp_info.pri_type = QED_UFP_PRI_VNIC;
+ } else if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_OS) {
+ p_hwfn->ufp_info.pri_type = QED_UFP_PRI_OS;
+ } else {
+ p_hwfn->ufp_info.pri_type = QED_UFP_PRI_UNKNOWN;
+ DP_NOTICE(p_hwfn,
+ "Unknown Host priority control %d port_id 0x%02x\n",
+ val, MFW_PORT(p_hwfn));
+ }
+
+ DP_NOTICE(p_hwfn,
+ "UFP shmem config: mode = %d tc = %d pri_type = %d port_id 0x%02x\n",
+ p_hwfn->ufp_info.mode, p_hwfn->ufp_info.tc,
+ p_hwfn->ufp_info.pri_type, MFW_PORT(p_hwfn));
+}
+
+static int
+qed_mcp_handle_ufp_event(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ qed_mcp_read_ufp_config(p_hwfn, p_ptt);
+
+ if (p_hwfn->ufp_info.mode == QED_UFP_MODE_VNIC_BW) {
+ p_hwfn->qm_info.ooo_tc = p_hwfn->ufp_info.tc;
+ qed_hw_info_set_offload_tc(&p_hwfn->hw_info,
+ p_hwfn->ufp_info.tc);
+
+ qed_qm_reconf(p_hwfn, p_ptt);
+ } else if (p_hwfn->ufp_info.mode == QED_UFP_MODE_ETS) {
+ /* Merge UFP TC with the dcbx TC data */
+ qed_dcbx_mib_update_event(p_hwfn, p_ptt,
+ QED_DCBX_OPERATIONAL_MIB);
+ } else {
+ DP_ERR(p_hwfn, "Invalid sched type, discard the UFP config\n");
+ return -EINVAL;
+ }
+
+ /* update storm FW with negotiation results */
+ qed_sp_pf_update_ufp(p_hwfn);
+
+ /* update stag pcp value */
+ qed_sp_pf_update_stag(p_hwfn);
+
+ return 0;
+}
+
+int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct qed_mcp_info *info = p_hwfn->mcp_info;
+ int rc = 0;
+ bool found = false;
+ u16 i;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP, "Received message from MFW\n");
+
+ /* Read Messages from MFW */
+ qed_mcp_read_mb(p_hwfn, p_ptt);
+
+ /* Compare current messages to old ones */
+ for (i = 0; i < info->mfw_mb_length; i++) {
+ if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
+ continue;
+
+ found = true;
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
+ i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
+
+ spin_lock_bh(&p_hwfn->mcp_info->unload_lock);
+ if (test_bit(QED_MCP_BYPASS_PROC_BIT,
+ &p_hwfn->mcp_info->mcp_handling_status)) {
+ spin_unlock_bh(&p_hwfn->mcp_info->unload_lock);
+ DP_INFO(p_hwfn,
+ "Msg [%d] is bypassed on unload flow\n", i);
+ continue;
+ }
+
+ set_bit(QED_MCP_IN_PROCESSING_BIT,
+ &p_hwfn->mcp_info->mcp_handling_status);
+ spin_unlock_bh(&p_hwfn->mcp_info->unload_lock);
+
+ switch (i) {
+ case MFW_DRV_MSG_LINK_CHANGE:
+ qed_mcp_handle_link_change(p_hwfn, p_ptt, false);
+ break;
+ case MFW_DRV_MSG_VF_DISABLED:
+ qed_mcp_handle_vf_flr(p_hwfn, p_ptt);
+ break;
+ case MFW_DRV_MSG_LLDP_DATA_UPDATED:
+ qed_dcbx_mib_update_event(p_hwfn, p_ptt,
+ QED_DCBX_REMOTE_LLDP_MIB);
+ break;
+ case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
+ qed_dcbx_mib_update_event(p_hwfn, p_ptt,
+ QED_DCBX_REMOTE_MIB);
+ break;
+ case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
+ qed_dcbx_mib_update_event(p_hwfn, p_ptt,
+ QED_DCBX_OPERATIONAL_MIB);
+ break;
+ case MFW_DRV_MSG_OEM_CFG_UPDATE:
+ qed_mcp_handle_ufp_event(p_hwfn, p_ptt);
+ break;
+ case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
+ qed_mcp_handle_transceiver_change(p_hwfn, p_ptt);
+ break;
+ case MFW_DRV_MSG_ERROR_RECOVERY:
+ qed_mcp_handle_process_kill(p_hwfn, p_ptt);
+ break;
+ case MFW_DRV_MSG_GET_LAN_STATS:
+ case MFW_DRV_MSG_GET_FCOE_STATS:
+ case MFW_DRV_MSG_GET_ISCSI_STATS:
+ case MFW_DRV_MSG_GET_RDMA_STATS:
+ qed_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
+ break;
+ case MFW_DRV_MSG_BW_UPDATE:
+ qed_mcp_update_bw(p_hwfn, p_ptt);
+ break;
+ case MFW_DRV_MSG_S_TAG_UPDATE:
+ qed_mcp_update_stag(p_hwfn, p_ptt);
+ break;
+ case MFW_DRV_MSG_FAILURE_DETECTED:
+ qed_mcp_handle_fan_failure(p_hwfn, p_ptt);
+ break;
+ case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED:
+ qed_mcp_handle_critical_error(p_hwfn, p_ptt);
+ break;
+ case MFW_DRV_MSG_GET_TLV_REQ:
+ qed_mfw_tlv_req(p_hwfn);
+ break;
+ default:
+ DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
+ rc = -EINVAL;
+ }
+
+ clear_bit(QED_MCP_IN_PROCESSING_BIT,
+ &p_hwfn->mcp_info->mcp_handling_status);
+ }
+
+ /* ACK everything */
+ for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
+ __be32 val = cpu_to_be32(((u32 *)info->mfw_mb_cur)[i]);
+
+ /* MFW expect answer in BE, so we force write in that format */
+ qed_wr(p_hwfn, p_ptt,
+ info->mfw_mb_addr + sizeof(u32) +
+ MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
+ sizeof(u32) + i * sizeof(u32),
+ (__force u32)val);
+ }
+
+ if (!found) {
+ DP_NOTICE(p_hwfn,
+ "Received an MFW message indication but no new message!\n");
+ rc = -EINVAL;
+ }
+
+ /* Copy the new mfw messages into the shadow */
+ memcpy(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
+
+ return rc;
+}
+
+int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *p_mfw_ver, u32 *p_running_bundle_id)
+{
+ u32 global_offsize, public_base;
+
+ if (IS_VF(p_hwfn->cdev)) {
+ if (p_hwfn->vf_iov_info) {
+ struct pfvf_acquire_resp_tlv *p_resp;
+
+ p_resp = &p_hwfn->vf_iov_info->acquire_resp;
+ *p_mfw_ver = p_resp->pfdev_info.mfw_ver;
+ return 0;
+ } else {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF requested MFW version prior to ACQUIRE\n");
+ return -EINVAL;
+ }
+ }
+
+ public_base = p_hwfn->mcp_info->public_base;
+ global_offsize = qed_rd(p_hwfn, p_ptt,
+ SECTION_OFFSIZE_ADDR(public_base,
+ PUBLIC_GLOBAL));
+ *p_mfw_ver =
+ qed_rd(p_hwfn, p_ptt,
+ SECTION_ADDR(global_offsize,
+ 0) + offsetof(struct public_global, mfw_ver));
+
+ if (p_running_bundle_id) {
+ *p_running_bundle_id = qed_rd(p_hwfn, p_ptt,
+ SECTION_ADDR(global_offsize, 0) +
+ offsetof(struct public_global,
+ running_bundle_id));
+ }
+
+ return 0;
+}
+
+int qed_mcp_get_mbi_ver(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *p_mbi_ver)
+{
+ u32 nvm_cfg_addr, nvm_cfg1_offset, mbi_ver_addr;
+
+ if (IS_VF(p_hwfn->cdev))
+ return -EINVAL;
+
+ /* Read the address of the nvm_cfg */
+ nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
+ if (!nvm_cfg_addr) {
+ DP_NOTICE(p_hwfn, "Shared memory not initialized\n");
+ return -EINVAL;
+ }
+
+ /* Read the offset of nvm_cfg1 */
+ nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
+
+ mbi_ver_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ offsetof(struct nvm_cfg1, glob) +
+ offsetof(struct nvm_cfg1_glob, mbi_version);
+ *p_mbi_ver = qed_rd(p_hwfn, p_ptt,
+ mbi_ver_addr) &
+ (NVM_CFG1_GLOB_MBI_VERSION_0_MASK |
+ NVM_CFG1_GLOB_MBI_VERSION_1_MASK |
+ NVM_CFG1_GLOB_MBI_VERSION_2_MASK);
+
+ return 0;
+}
+
+int qed_mcp_get_media_type(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *p_media_type)
+{
+ *p_media_type = MEDIA_UNSPECIFIED;
+
+ if (IS_VF(p_hwfn->cdev))
+ return -EINVAL;
+
+ if (!qed_mcp_is_init(p_hwfn)) {
+ DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
+ return -EBUSY;
+ }
+
+ if (!p_ptt) {
+ *p_media_type = MEDIA_UNSPECIFIED;
+ return -EINVAL;
+ }
+
+ *p_media_type = qed_rd(p_hwfn, p_ptt,
+ p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port,
+ media_type));
+
+ return 0;
+}
+
+int qed_mcp_get_transceiver_data(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *p_transceiver_state,
+ u32 *p_transceiver_type)
+{
+ u32 transceiver_info;
+
+ *p_transceiver_type = ETH_TRANSCEIVER_TYPE_NONE;
+ *p_transceiver_state = ETH_TRANSCEIVER_STATE_UPDATING;
+
+ if (IS_VF(p_hwfn->cdev))
+ return -EINVAL;
+
+ if (!qed_mcp_is_init(p_hwfn)) {
+ DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
+ return -EBUSY;
+ }
+
+ transceiver_info = qed_rd(p_hwfn, p_ptt,
+ p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port,
+ transceiver_data));
+
+ *p_transceiver_state = (transceiver_info &
+ ETH_TRANSCEIVER_STATE_MASK) >>
+ ETH_TRANSCEIVER_STATE_OFFSET;
+
+ if (*p_transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
+ *p_transceiver_type = (transceiver_info &
+ ETH_TRANSCEIVER_TYPE_MASK) >>
+ ETH_TRANSCEIVER_TYPE_OFFSET;
+ else
+ *p_transceiver_type = ETH_TRANSCEIVER_TYPE_UNKNOWN;
+
+ return 0;
+}
+
+static bool qed_is_transceiver_ready(u32 transceiver_state,
+ u32 transceiver_type)
+{
+ if ((transceiver_state & ETH_TRANSCEIVER_STATE_PRESENT) &&
+ ((transceiver_state & ETH_TRANSCEIVER_STATE_UPDATING) == 0x0) &&
+ (transceiver_type != ETH_TRANSCEIVER_TYPE_NONE))
+ return true;
+
+ return false;
+}
+
+int qed_mcp_trans_speed_mask(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *p_speed_mask)
+{
+ u32 transceiver_type, transceiver_state;
+ int ret;
+
+ ret = qed_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_state,
+ &transceiver_type);
+ if (ret)
+ return ret;
+
+ if (qed_is_transceiver_ready(transceiver_state, transceiver_type) ==
+ false)
+ return -EINVAL;
+
+ switch (transceiver_type) {
+ case ETH_TRANSCEIVER_TYPE_1G_LX:
+ case ETH_TRANSCEIVER_TYPE_1G_SX:
+ case ETH_TRANSCEIVER_TYPE_1G_PCC:
+ case ETH_TRANSCEIVER_TYPE_1G_ACC:
+ case ETH_TRANSCEIVER_TYPE_1000BASET:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
+ break;
+ case ETH_TRANSCEIVER_TYPE_10G_SR:
+ case ETH_TRANSCEIVER_TYPE_10G_LR:
+ case ETH_TRANSCEIVER_TYPE_10G_LRM:
+ case ETH_TRANSCEIVER_TYPE_10G_ER:
+ case ETH_TRANSCEIVER_TYPE_10G_PCC:
+ case ETH_TRANSCEIVER_TYPE_10G_ACC:
+ case ETH_TRANSCEIVER_TYPE_4x10G:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
+ break;
+ case ETH_TRANSCEIVER_TYPE_40G_LR4:
+ case ETH_TRANSCEIVER_TYPE_40G_SR4:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
+ break;
+ case ETH_TRANSCEIVER_TYPE_100G_AOC:
+ case ETH_TRANSCEIVER_TYPE_100G_SR4:
+ case ETH_TRANSCEIVER_TYPE_100G_LR4:
+ case ETH_TRANSCEIVER_TYPE_100G_ER4:
+ case ETH_TRANSCEIVER_TYPE_100G_ACC:
+ *p_speed_mask =
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
+ break;
+ case ETH_TRANSCEIVER_TYPE_25G_SR:
+ case ETH_TRANSCEIVER_TYPE_25G_LR:
+ case ETH_TRANSCEIVER_TYPE_25G_AOC:
+ case ETH_TRANSCEIVER_TYPE_25G_ACC_S:
+ case ETH_TRANSCEIVER_TYPE_25G_ACC_M:
+ case ETH_TRANSCEIVER_TYPE_25G_ACC_L:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
+ break;
+ case ETH_TRANSCEIVER_TYPE_25G_CA_N:
+ case ETH_TRANSCEIVER_TYPE_25G_CA_S:
+ case ETH_TRANSCEIVER_TYPE_25G_CA_L:
+ case ETH_TRANSCEIVER_TYPE_4x25G_CR:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
+ break;
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_LR:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
+ break;
+ case ETH_TRANSCEIVER_TYPE_40G_CR4:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
+ break;
+ case ETH_TRANSCEIVER_TYPE_100G_CR4:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR:
+ *p_speed_mask =
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
+ break;
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC:
+ *p_speed_mask =
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
+ break;
+ case ETH_TRANSCEIVER_TYPE_XLPPI:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
+ break;
+ case ETH_TRANSCEIVER_TYPE_10G_BASET:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR:
+ case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR:
+ *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G |
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
+ break;
+ default:
+ DP_INFO(p_hwfn, "Unknown transceiver type 0x%x\n",
+ transceiver_type);
+ *p_speed_mask = 0xff;
+ break;
+ }
+
+ return 0;
+}
+
+int qed_mcp_get_board_config(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *p_board_config)
+{
+ u32 nvm_cfg_addr, nvm_cfg1_offset, port_cfg_addr;
+
+ if (IS_VF(p_hwfn->cdev))
+ return -EINVAL;
+
+ if (!qed_mcp_is_init(p_hwfn)) {
+ DP_NOTICE(p_hwfn, "MFW is not initialized!\n");
+ return -EBUSY;
+ }
+ if (!p_ptt) {
+ *p_board_config = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED;
+ return -EINVAL;
+ }
+
+ nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
+ nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
+ port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
+ *p_board_config = qed_rd(p_hwfn, p_ptt,
+ port_cfg_addr +
+ offsetof(struct nvm_cfg1_port,
+ board_cfg));
+
+ return 0;
+}
+
+/* Old MFW has a global configuration for all PFs regarding RDMA support */
+static void
+qed_mcp_get_shmem_proto_legacy(struct qed_hwfn *p_hwfn,
+ enum qed_pci_personality *p_proto)
+{
+ /* There wasn't ever a legacy MFW that published iwarp.
+ * So at this point, this is either plain l2 or RoCE.
+ */
+ if (test_bit(QED_DEV_CAP_ROCE, &p_hwfn->hw_info.device_capabilities))
+ *p_proto = QED_PCI_ETH_ROCE;
+ else
+ *p_proto = QED_PCI_ETH;
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
+ "According to Legacy capabilities, L2 personality is %08x\n",
+ (u32)*p_proto);
+}
+
+static int
+qed_mcp_get_shmem_proto_mfw(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_pci_personality *p_proto)
+{
+ u32 resp = 0, param = 0;
+ int rc;
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt,
+ DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL, 0, &resp, &param);
+ if (rc)
+ return rc;
+ if (resp != FW_MSG_CODE_OK) {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
+ "MFW lacks support for command; Returns %08x\n",
+ resp);
+ return -EINVAL;
+ }
+
+ switch (param) {
+ case FW_MB_PARAM_GET_PF_RDMA_NONE:
+ *p_proto = QED_PCI_ETH;
+ break;
+ case FW_MB_PARAM_GET_PF_RDMA_ROCE:
+ *p_proto = QED_PCI_ETH_ROCE;
+ break;
+ case FW_MB_PARAM_GET_PF_RDMA_IWARP:
+ *p_proto = QED_PCI_ETH_IWARP;
+ break;
+ case FW_MB_PARAM_GET_PF_RDMA_BOTH:
+ *p_proto = QED_PCI_ETH_RDMA;
+ break;
+ default:
+ DP_NOTICE(p_hwfn,
+ "MFW answers GET_PF_RDMA_PROTOCOL but param is %08x\n",
+ param);
+ return -EINVAL;
+ }
+
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_IFUP,
+ "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n",
+ (u32)*p_proto, resp, param);
+ return 0;
+}
+
+static int
+qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn,
+ struct public_func *p_info,
+ struct qed_ptt *p_ptt,
+ enum qed_pci_personality *p_proto)
+{
+ int rc = 0;
+
+ switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
+ case FUNC_MF_CFG_PROTOCOL_ETHERNET:
+ if (!IS_ENABLED(CONFIG_QED_RDMA))
+ *p_proto = QED_PCI_ETH;
+ else if (qed_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto))
+ qed_mcp_get_shmem_proto_legacy(p_hwfn, p_proto);
+ break;
+ case FUNC_MF_CFG_PROTOCOL_ISCSI:
+ *p_proto = QED_PCI_ISCSI;
+ break;
+ case FUNC_MF_CFG_PROTOCOL_FCOE:
+ *p_proto = QED_PCI_FCOE;
+ break;
+ case FUNC_MF_CFG_PROTOCOL_ROCE:
+ DP_NOTICE(p_hwfn, "RoCE personality is not a valid value!\n");
+ fallthrough;
+ default:
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct qed_mcp_function_info *info;
+ struct public_func shmem_info;
+
+ qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
+ info = &p_hwfn->mcp_info->func_info;
+
+ info->pause_on_host = (shmem_info.config &
+ FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
+
+ if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt,
+ &info->protocol)) {
+ DP_ERR(p_hwfn, "Unknown personality %08x\n",
+ (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
+ return -EINVAL;
+ }
+
+ qed_read_pf_bandwidth(p_hwfn, &shmem_info);
+
+ if (shmem_info.mac_upper || shmem_info.mac_lower) {
+ info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
+ info->mac[1] = (u8)(shmem_info.mac_upper);
+ info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
+ info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
+ info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
+ info->mac[5] = (u8)(shmem_info.mac_lower);
+
+ /* Store primary MAC for later possible WoL */
+ memcpy(&p_hwfn->cdev->wol_mac, info->mac, ETH_ALEN);
+ } else {
+ DP_NOTICE(p_hwfn, "MAC is 0 in shmem\n");
+ }
+
+ info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_lower |
+ (((u64)shmem_info.fcoe_wwn_port_name_upper) << 32);
+ info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_lower |
+ (((u64)shmem_info.fcoe_wwn_node_name_upper) << 32);
+
+ info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
+
+ info->mtu = (u16)shmem_info.mtu_size;
+
+ p_hwfn->hw_info.b_wol_support = QED_WOL_SUPPORT_NONE;
+ p_hwfn->cdev->wol_config = (u8)QED_OV_WOL_DEFAULT;
+ if (qed_mcp_is_init(p_hwfn)) {
+ u32 resp = 0, param = 0;
+ int rc;
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt,
+ DRV_MSG_CODE_OS_WOL, 0, &resp, &param);
+ if (rc)
+ return rc;
+ if (resp == FW_MSG_CODE_OS_WOL_SUPPORTED)
+ p_hwfn->hw_info.b_wol_support = QED_WOL_SUPPORT_PME;
+ }
+
+ DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_IFUP),
+ "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %pM wwn port %llx node %llx ovlan %04x wol %02x\n",
+ info->pause_on_host, info->protocol,
+ info->bandwidth_min, info->bandwidth_max,
+ info->mac,
+ info->wwn_port, info->wwn_node,
+ info->ovlan, (u8)p_hwfn->hw_info.b_wol_support);
+
+ return 0;
+}
+
+struct qed_mcp_link_params
+*qed_mcp_get_link_params(struct qed_hwfn *p_hwfn)
+{
+ if (!p_hwfn || !p_hwfn->mcp_info)
+ return NULL;
+ return &p_hwfn->mcp_info->link_input;
+}
+
+struct qed_mcp_link_state
+*qed_mcp_get_link_state(struct qed_hwfn *p_hwfn)
+{
+ if (!p_hwfn || !p_hwfn->mcp_info)
+ return NULL;
+ return &p_hwfn->mcp_info->link_output;
+}
+
+struct qed_mcp_link_capabilities
+*qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn)
+{
+ if (!p_hwfn || !p_hwfn->mcp_info)
+ return NULL;
+ return &p_hwfn->mcp_info->link_capabilities;
+}
+
+int qed_mcp_drain(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 resp = 0, param = 0;
+ int rc;
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt,
+ DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, &param);
+
+ /* Wait for the drain to complete before returning */
+ msleep(1020);
+
+ return rc;
+}
+
+int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *p_flash_size)
+{
+ u32 flash_size;
+
+ if (IS_VF(p_hwfn->cdev))
+ return -EINVAL;
+
+ flash_size = qed_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
+ flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
+ MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
+ flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT));
+
+ *p_flash_size = flash_size;
+
+ return 0;
+}
+
+int qed_start_recovery_process(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_dev *cdev = p_hwfn->cdev;
+
+ if (cdev->recov_in_prog) {
+ DP_NOTICE(p_hwfn,
+ "Avoid triggering a recovery since such a process is already in progress\n");
+ return -EAGAIN;
+ }
+
+ DP_NOTICE(p_hwfn, "Triggering a recovery process\n");
+ qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1);
+
+ return 0;
+}
+
+#define QED_RECOVERY_PROLOG_SLEEP_MS 100
+
+int qed_recovery_prolog(struct qed_dev *cdev)
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
+ int rc;
+
+ /* Allow ongoing PCIe transactions to complete */
+ msleep(QED_RECOVERY_PROLOG_SLEEP_MS);
+
+ /* Clear the PF's internal FID_enable in the PXP */
+ rc = qed_pglueb_set_pfid_enable(p_hwfn, p_ptt, false);
+ if (rc)
+ DP_NOTICE(p_hwfn,
+ "qed_pglueb_set_pfid_enable() failed. rc = %d.\n",
+ rc);
+
+ return rc;
+}
+
+static int
+qed_mcp_config_vf_msix_bb(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 vf_id, u8 num)
+{
+ u32 resp = 0, param = 0, rc_param = 0;
+ int rc;
+
+ /* Only Leader can configure MSIX, and need to take CMT into account */
+ if (!IS_LEAD_HWFN(p_hwfn))
+ return 0;
+ num *= p_hwfn->cdev->num_hwfns;
+
+ param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) &
+ DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
+ param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) &
+ DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
+ &resp, &rc_param);
+
+ if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
+ DP_NOTICE(p_hwfn, "VF[%d]: MFW failed to set MSI-X\n", vf_id);
+ rc = -EINVAL;
+ } else {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
+ num, vf_id);
+ }
+
+ return rc;
+}
+
+static int
+qed_mcp_config_vf_msix_ah(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 num)
+{
+ u32 resp = 0, param = num, rc_param = 0;
+ int rc;
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_PF_VFS_MSIX,
+ param, &resp, &rc_param);
+
+ if (resp != FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE) {
+ DP_NOTICE(p_hwfn, "MFW failed to set MSI-X for VFs\n");
+ rc = -EINVAL;
+ } else {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Requested 0x%02x MSI-x interrupts for VFs\n", num);
+ }
+
+ return rc;
+}
+
+int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 vf_id, u8 num)
+{
+ if (QED_IS_BB(p_hwfn->cdev))
+ return qed_mcp_config_vf_msix_bb(p_hwfn, p_ptt, vf_id, num);
+ else
+ return qed_mcp_config_vf_msix_ah(p_hwfn, p_ptt, num);
+}
+
+int
+qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_mcp_drv_version *p_ver)
+{
+ struct qed_mcp_mb_params mb_params;
+ struct drv_version_stc drv_version;
+ __be32 val;
+ u32 i;
+ int rc;
+
+ memset(&drv_version, 0, sizeof(drv_version));
+ drv_version.version = p_ver->version;
+ for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / sizeof(u32); i++) {
+ val = cpu_to_be32(*((u32 *)&p_ver->name[i * sizeof(u32)]));
+ *(__be32 *)&drv_version.name[i * sizeof(u32)] = val;
+ }
+
+ memset(&mb_params, 0, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_SET_VERSION;
+ mb_params.p_data_src = &drv_version;
+ mb_params.data_src_size = sizeof(drv_version);
+ rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc)
+ DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+
+ return rc;
+}
+
+/* A maximal 100 msec waiting time for the MCP to halt */
+#define QED_MCP_HALT_SLEEP_MS 10
+#define QED_MCP_HALT_MAX_RETRIES 10
+
+int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 resp = 0, param = 0, cpu_state, cnt = 0;
+ int rc;
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
+ &param);
+ if (rc) {
+ DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+ return rc;
+ }
+
+ do {
+ msleep(QED_MCP_HALT_SLEEP_MS);
+ cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
+ if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED)
+ break;
+ } while (++cnt < QED_MCP_HALT_MAX_RETRIES);
+
+ if (cnt == QED_MCP_HALT_MAX_RETRIES) {
+ DP_NOTICE(p_hwfn,
+ "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
+ qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state);
+ return -EBUSY;
+ }
+
+ qed_mcp_cmd_set_blocking(p_hwfn, true);
+
+ return 0;
+}
+
+#define QED_MCP_RESUME_SLEEP_MS 10
+
+int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 cpu_mode, cpu_state;
+
+ qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
+
+ cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
+ cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT;
+ qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode);
+ msleep(QED_MCP_RESUME_SLEEP_MS);
+ cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
+
+ if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) {
+ DP_NOTICE(p_hwfn,
+ "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
+ cpu_mode, cpu_state);
+ return -EBUSY;
+ }
+
+ qed_mcp_cmd_set_blocking(p_hwfn, false);
+
+ return 0;
+}
+
+int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_ov_client client)
+{
+ u32 resp = 0, param = 0;
+ u32 drv_mb_param;
+ int rc;
+
+ switch (client) {
+ case QED_OV_CLIENT_DRV:
+ drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
+ break;
+ case QED_OV_CLIENT_USER:
+ drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
+ break;
+ case QED_OV_CLIENT_VENDOR_SPEC:
+ drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "Invalid client type %d\n", client);
+ return -EINVAL;
+ }
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG,
+ drv_mb_param, &resp, &param);
+ if (rc)
+ DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+
+ return rc;
+}
+
+int qed_mcp_ov_update_driver_state(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_ov_driver_state drv_state)
+{
+ u32 resp = 0, param = 0;
+ u32 drv_mb_param;
+ int rc;
+
+ switch (drv_state) {
+ case QED_OV_DRIVER_STATE_NOT_LOADED:
+ drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED;
+ break;
+ case QED_OV_DRIVER_STATE_DISABLED:
+ drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED;
+ break;
+ case QED_OV_DRIVER_STATE_ACTIVE:
+ drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "Invalid driver state %d\n", drv_state);
+ return -EINVAL;
+ }
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
+ drv_mb_param, &resp, &param);
+ if (rc)
+ DP_ERR(p_hwfn, "Failed to send driver state\n");
+
+ return rc;
+}
+
+int qed_mcp_ov_update_mtu(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u16 mtu)
+{
+ u32 resp = 0, param = 0;
+ u32 drv_mb_param;
+ int rc;
+
+ drv_mb_param = (u32)mtu << DRV_MB_PARAM_OV_MTU_SIZE_SHIFT;
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_MTU,
+ drv_mb_param, &resp, &param);
+ if (rc)
+ DP_ERR(p_hwfn, "Failed to send mtu value, rc = %d\n", rc);
+
+ return rc;
+}
+
+int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, const u8 *mac)
+{
+ struct qed_mcp_mb_params mb_params;
+ u32 mfw_mac[2];
+ int rc;
+
+ memset(&mb_params, 0, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_SET_VMAC;
+ mb_params.param = DRV_MSG_CODE_VMAC_TYPE_MAC <<
+ DRV_MSG_CODE_VMAC_TYPE_SHIFT;
+ mb_params.param |= MCP_PF_ID(p_hwfn);
+
+ /* MCP is BE, and on LE platforms PCI would swap access to SHMEM
+ * in 32-bit granularity.
+ * So the MAC has to be set in native order [and not byte order],
+ * otherwise it would be read incorrectly by MFW after swap.
+ */
+ mfw_mac[0] = mac[0] << 24 | mac[1] << 16 | mac[2] << 8 | mac[3];
+ mfw_mac[1] = mac[4] << 24 | mac[5] << 16;
+
+ mb_params.p_data_src = (u8 *)mfw_mac;
+ mb_params.data_src_size = 8;
+ rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc)
+ DP_ERR(p_hwfn, "Failed to send mac address, rc = %d\n", rc);
+
+ /* Store primary MAC for later possible WoL */
+ memcpy(p_hwfn->cdev->wol_mac, mac, ETH_ALEN);
+
+ return rc;
+}
+
+int qed_mcp_ov_update_wol(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, enum qed_ov_wol wol)
+{
+ u32 resp = 0, param = 0;
+ u32 drv_mb_param;
+ int rc;
+
+ if (p_hwfn->hw_info.b_wol_support == QED_WOL_SUPPORT_NONE) {
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "Can't change WoL configuration when WoL isn't supported\n");
+ return -EINVAL;
+ }
+
+ switch (wol) {
+ case QED_OV_WOL_DEFAULT:
+ drv_mb_param = DRV_MB_PARAM_WOL_DEFAULT;
+ break;
+ case QED_OV_WOL_DISABLED:
+ drv_mb_param = DRV_MB_PARAM_WOL_DISABLED;
+ break;
+ case QED_OV_WOL_ENABLED:
+ drv_mb_param = DRV_MB_PARAM_WOL_ENABLED;
+ break;
+ default:
+ DP_ERR(p_hwfn, "Invalid wol state %d\n", wol);
+ return -EINVAL;
+ }
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_WOL,
+ drv_mb_param, &resp, &param);
+ if (rc)
+ DP_ERR(p_hwfn, "Failed to send wol mode, rc = %d\n", rc);
+
+ /* Store the WoL update for a future unload */
+ p_hwfn->cdev->wol_config = (u8)wol;
+
+ return rc;
+}
+
+int qed_mcp_ov_update_eswitch(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_ov_eswitch eswitch)
+{
+ u32 resp = 0, param = 0;
+ u32 drv_mb_param;
+ int rc;
+
+ switch (eswitch) {
+ case QED_OV_ESWITCH_NONE:
+ drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_NONE;
+ break;
+ case QED_OV_ESWITCH_VEB:
+ drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEB;
+ break;
+ case QED_OV_ESWITCH_VEPA:
+ drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEPA;
+ break;
+ default:
+ DP_ERR(p_hwfn, "Invalid eswitch mode %d\n", eswitch);
+ return -EINVAL;
+ }
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE,
+ drv_mb_param, &resp, &param);
+ if (rc)
+ DP_ERR(p_hwfn, "Failed to send eswitch mode, rc = %d\n", rc);
+
+ return rc;
+}
+
+int qed_mcp_set_led(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, enum qed_led_mode mode)
+{
+ u32 resp = 0, param = 0, drv_mb_param;
+ int rc;
+
+ switch (mode) {
+ case QED_LED_MODE_ON:
+ drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
+ break;
+ case QED_LED_MODE_OFF:
+ drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
+ break;
+ case QED_LED_MODE_RESTORE:
+ drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "Invalid LED mode %d\n", mode);
+ return -EINVAL;
+ }
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
+ drv_mb_param, &resp, &param);
+
+ return rc;
+}
+
+int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 mask_parities)
+{
+ u32 resp = 0, param = 0;
+ int rc;
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
+ mask_parities, &resp, &param);
+
+ if (rc) {
+ DP_ERR(p_hwfn,
+ "MCP response failure for mask parities, aborting\n");
+ } else if (resp != FW_MSG_CODE_OK) {
+ DP_ERR(p_hwfn,
+ "MCP did not acknowledge mask parity request. Old MFW?\n");
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len)
+{
+ u32 bytes_left = len, offset = 0, bytes_to_copy, read_len = 0;
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ u32 resp = 0, resp_param = 0;
+ struct qed_ptt *p_ptt;
+ int rc = 0;
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EBUSY;
+
+ while (bytes_left > 0) {
+ bytes_to_copy = min_t(u32, bytes_left, MCP_DRV_NVM_BUF_LEN);
+
+ rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
+ DRV_MSG_CODE_NVM_READ_NVRAM,
+ addr + offset +
+ (bytes_to_copy <<
+ DRV_MB_PARAM_NVM_LEN_OFFSET),
+ &resp, &resp_param,
+ &read_len,
+ (u32 *)(p_buf + offset), false);
+
+ if (rc || (resp != FW_MSG_CODE_NVM_OK)) {
+ DP_NOTICE(cdev, "MCP command rc = %d\n", rc);
+ break;
+ }
+
+ /* This can be a lengthy process, and it's possible scheduler
+ * isn't preemptible. Sleep a bit to prevent CPU hogging.
+ */
+ if (bytes_left % 0x1000 <
+ (bytes_left - read_len) % 0x1000)
+ usleep_range(1000, 2000);
+
+ offset += read_len;
+ bytes_left -= read_len;
+ }
+
+ cdev->mcp_nvm_resp = resp;
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
+int qed_mcp_nvm_resp(struct qed_dev *cdev, u8 *p_buf)
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *p_ptt;
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EBUSY;
+
+ memcpy(p_buf, &cdev->mcp_nvm_resp, sizeof(cdev->mcp_nvm_resp));
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ return 0;
+}
+
+int qed_mcp_nvm_write(struct qed_dev *cdev,
+ u32 cmd, u32 addr, u8 *p_buf, u32 len)
+{
+ u32 buf_idx = 0, buf_size, nvm_cmd, nvm_offset, resp = 0, param;
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *p_ptt;
+ int rc = -EINVAL;
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EBUSY;
+
+ switch (cmd) {
+ case QED_PUT_FILE_BEGIN:
+ nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN;
+ break;
+ case QED_PUT_FILE_DATA:
+ nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
+ break;
+ case QED_NVM_WRITE_NVRAM:
+ nvm_cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "Invalid nvm write command 0x%x\n", cmd);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ buf_size = min_t(u32, (len - buf_idx), MCP_DRV_NVM_BUF_LEN);
+ while (buf_idx < len) {
+ if (cmd == QED_PUT_FILE_BEGIN)
+ nvm_offset = addr;
+ else
+ nvm_offset = ((buf_size <<
+ DRV_MB_PARAM_NVM_LEN_OFFSET) | addr) +
+ buf_idx;
+ rc = qed_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, nvm_offset,
+ &resp, &param, buf_size,
+ (u32 *)&p_buf[buf_idx]);
+ if (rc) {
+ DP_NOTICE(cdev, "nvm write failed, rc = %d\n", rc);
+ resp = FW_MSG_CODE_ERROR;
+ break;
+ }
+
+ if (resp != FW_MSG_CODE_OK &&
+ resp != FW_MSG_CODE_NVM_OK &&
+ resp != FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK) {
+ DP_NOTICE(cdev,
+ "nvm write failed, resp = 0x%08x\n", resp);
+ rc = -EINVAL;
+ break;
+ }
+
+ /* This can be a lengthy process, and it's possible scheduler
+ * isn't pre-emptable. Sleep a bit to prevent CPU hogging.
+ */
+ if (buf_idx % 0x1000 > (buf_idx + buf_size) % 0x1000)
+ usleep_range(1000, 2000);
+
+ /* For MBI upgrade, MFW response includes the next buffer offset
+ * to be delivered to MFW.
+ */
+ if (param && cmd == QED_PUT_FILE_DATA) {
+ buf_idx =
+ QED_MFW_GET_FIELD(param,
+ FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET);
+ buf_size =
+ QED_MFW_GET_FIELD(param,
+ FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE);
+ } else {
+ buf_idx += buf_size;
+ buf_size = min_t(u32, (len - buf_idx),
+ MCP_DRV_NVM_BUF_LEN);
+ }
+ }
+
+ cdev->mcp_nvm_resp = resp;
+out:
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ return rc;
+}
+
+int qed_mcp_phy_sfp_read(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u32 port, u32 addr, u32 offset, u32 len, u8 *p_buf)
+{
+ u32 bytes_left, bytes_to_copy, buf_size, nvm_offset = 0;
+ u32 resp, param;
+ int rc;
+
+ nvm_offset |= (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) &
+ DRV_MB_PARAM_TRANSCEIVER_PORT_MASK;
+ nvm_offset |= (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET) &
+ DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK;
+
+ addr = offset;
+ offset = 0;
+ bytes_left = len;
+ while (bytes_left > 0) {
+ bytes_to_copy = min_t(u32, bytes_left,
+ MAX_I2C_TRANSACTION_SIZE);
+ nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK |
+ DRV_MB_PARAM_TRANSCEIVER_PORT_MASK);
+ nvm_offset |= ((addr + offset) <<
+ DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET) &
+ DRV_MB_PARAM_TRANSCEIVER_OFFSET_MASK;
+ nvm_offset |= (bytes_to_copy <<
+ DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET) &
+ DRV_MB_PARAM_TRANSCEIVER_SIZE_MASK;
+ rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
+ DRV_MSG_CODE_TRANSCEIVER_READ,
+ nvm_offset, &resp, &param, &buf_size,
+ (u32 *)(p_buf + offset), true);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "Failed to send a transceiver read command to the MFW. rc = %d.\n",
+ rc);
+ return rc;
+ }
+
+ if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT)
+ return -ENODEV;
+ else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
+ return -EINVAL;
+
+ offset += buf_size;
+ bytes_left -= buf_size;
+ }
+
+ return 0;
+}
+
+int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 drv_mb_param = 0, rsp, param;
+ int rc = 0;
+
+ drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
+ DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
+ drv_mb_param, &rsp, &param);
+
+ if (rc)
+ return rc;
+
+ if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
+ (param != DRV_MB_PARAM_BIST_RC_PASSED))
+ rc = -EAGAIN;
+
+ return rc;
+}
+
+int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 drv_mb_param, rsp, param;
+ int rc = 0;
+
+ drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
+ DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
+ drv_mb_param, &rsp, &param);
+
+ if (rc)
+ return rc;
+
+ if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
+ (param != DRV_MB_PARAM_BIST_RC_PASSED))
+ rc = -EAGAIN;
+
+ return rc;
+}
+
+int qed_mcp_bist_nvm_get_num_images(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *num_images)
+{
+ u32 drv_mb_param = 0, rsp;
+ int rc = 0;
+
+ drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES <<
+ DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
+ drv_mb_param, &rsp, num_images);
+ if (rc)
+ return rc;
+
+ if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK))
+ rc = -EINVAL;
+
+ return rc;
+}
+
+int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct bist_nvm_image_att *p_image_att,
+ u32 image_index)
+{
+ u32 buf_size = 0, param, resp = 0, resp_param = 0;
+ int rc;
+
+ param = DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX <<
+ DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT;
+ param |= image_index << DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT;
+
+ rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
+ DRV_MSG_CODE_BIST_TEST, param,
+ &resp, &resp_param,
+ &buf_size,
+ (u32 *)p_image_att, false);
+ if (rc)
+ return rc;
+
+ if (((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
+ (p_image_att->return_code != 1))
+ rc = -EINVAL;
+
+ return rc;
+}
+
+int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn)
+{
+ struct qed_nvm_image_info nvm_info;
+ struct qed_ptt *p_ptt;
+ int rc;
+ u32 i;
+
+ if (p_hwfn->nvm_info.valid)
+ return 0;
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ DP_ERR(p_hwfn, "failed to acquire ptt\n");
+ return -EBUSY;
+ }
+
+ /* Acquire from MFW the amount of available images */
+ nvm_info.num_images = 0;
+ rc = qed_mcp_bist_nvm_get_num_images(p_hwfn,
+ p_ptt, &nvm_info.num_images);
+ if (rc == -EOPNOTSUPP) {
+ DP_INFO(p_hwfn, "DRV_MSG_CODE_BIST_TEST is not supported\n");
+ goto out;
+ } else if (rc || !nvm_info.num_images) {
+ DP_ERR(p_hwfn, "Failed getting number of images\n");
+ goto err0;
+ }
+
+ nvm_info.image_att = kmalloc_array(nvm_info.num_images,
+ sizeof(struct bist_nvm_image_att),
+ GFP_KERNEL);
+ if (!nvm_info.image_att) {
+ rc = -ENOMEM;
+ goto err0;
+ }
+
+ /* Iterate over images and get their attributes */
+ for (i = 0; i < nvm_info.num_images; i++) {
+ rc = qed_mcp_bist_nvm_get_image_att(p_hwfn, p_ptt,
+ &nvm_info.image_att[i], i);
+ if (rc) {
+ DP_ERR(p_hwfn,
+ "Failed getting image index %d attributes\n", i);
+ goto err1;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP, "image index %d, size %x\n", i,
+ nvm_info.image_att[i].len);
+ }
+out:
+ /* Update hwfn's nvm_info */
+ if (nvm_info.num_images) {
+ p_hwfn->nvm_info.num_images = nvm_info.num_images;
+ kfree(p_hwfn->nvm_info.image_att);
+ p_hwfn->nvm_info.image_att = nvm_info.image_att;
+ p_hwfn->nvm_info.valid = true;
+ }
+
+ qed_ptt_release(p_hwfn, p_ptt);
+ return 0;
+
+err1:
+ kfree(nvm_info.image_att);
+err0:
+ qed_ptt_release(p_hwfn, p_ptt);
+ return rc;
+}
+
+void qed_mcp_nvm_info_free(struct qed_hwfn *p_hwfn)
+{
+ kfree(p_hwfn->nvm_info.image_att);
+ p_hwfn->nvm_info.image_att = NULL;
+ p_hwfn->nvm_info.valid = false;
+}
+
+int
+qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
+ enum qed_nvm_images image_id,
+ struct qed_nvm_image_att *p_image_att)
+{
+ enum nvm_image_type type;
+ int rc;
+ u32 i;
+
+ /* Translate image_id into MFW definitions */
+ switch (image_id) {
+ case QED_NVM_IMAGE_ISCSI_CFG:
+ type = NVM_TYPE_ISCSI_CFG;
+ break;
+ case QED_NVM_IMAGE_FCOE_CFG:
+ type = NVM_TYPE_FCOE_CFG;
+ break;
+ case QED_NVM_IMAGE_MDUMP:
+ type = NVM_TYPE_MDUMP;
+ break;
+ case QED_NVM_IMAGE_NVM_CFG1:
+ type = NVM_TYPE_NVM_CFG1;
+ break;
+ case QED_NVM_IMAGE_DEFAULT_CFG:
+ type = NVM_TYPE_DEFAULT_CFG;
+ break;
+ case QED_NVM_IMAGE_NVM_META:
+ type = NVM_TYPE_NVM_META;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "Unknown request of image_id %08x\n",
+ image_id);
+ return -EINVAL;
+ }
+
+ rc = qed_mcp_nvm_info_populate(p_hwfn);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < p_hwfn->nvm_info.num_images; i++)
+ if (type == p_hwfn->nvm_info.image_att[i].image_type)
+ break;
+ if (i == p_hwfn->nvm_info.num_images) {
+ DP_VERBOSE(p_hwfn, QED_MSG_STORAGE,
+ "Failed to find nvram image of type %08x\n",
+ image_id);
+ return -ENOENT;
+ }
+
+ p_image_att->start_addr = p_hwfn->nvm_info.image_att[i].nvm_start_addr;
+ p_image_att->length = p_hwfn->nvm_info.image_att[i].len;
+
+ return 0;
+}
+
+int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn,
+ enum qed_nvm_images image_id,
+ u8 *p_buffer, u32 buffer_len)
+{
+ struct qed_nvm_image_att image_att;
+ int rc;
+
+ memset(p_buffer, 0, buffer_len);
+
+ rc = qed_mcp_get_nvm_image_att(p_hwfn, image_id, &image_att);
+ if (rc)
+ return rc;
+
+ /* Validate sizes - both the image's and the supplied buffer's */
+ if (image_att.length <= 4) {
+ DP_VERBOSE(p_hwfn, QED_MSG_STORAGE,
+ "Image [%d] is too small - only %d bytes\n",
+ image_id, image_att.length);
+ return -EINVAL;
+ }
+
+ if (image_att.length > buffer_len) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_STORAGE,
+ "Image [%d] is too big - %08x bytes where only %08x are available\n",
+ image_id, image_att.length, buffer_len);
+ return -ENOMEM;
+ }
+
+ return qed_mcp_nvm_read(p_hwfn->cdev, image_att.start_addr,
+ p_buffer, image_att.length);
+}
+
+static enum resource_id_enum qed_mcp_get_mfw_res_id(enum qed_resources res_id)
+{
+ enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID;
+
+ switch (res_id) {
+ case QED_SB:
+ mfw_res_id = RESOURCE_NUM_SB_E;
+ break;
+ case QED_L2_QUEUE:
+ mfw_res_id = RESOURCE_NUM_L2_QUEUE_E;
+ break;
+ case QED_VPORT:
+ mfw_res_id = RESOURCE_NUM_VPORT_E;
+ break;
+ case QED_RSS_ENG:
+ mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E;
+ break;
+ case QED_PQ:
+ mfw_res_id = RESOURCE_NUM_PQ_E;
+ break;
+ case QED_RL:
+ mfw_res_id = RESOURCE_NUM_RL_E;
+ break;
+ case QED_MAC:
+ case QED_VLAN:
+ /* Each VFC resource can accommodate both a MAC and a VLAN */
+ mfw_res_id = RESOURCE_VFC_FILTER_E;
+ break;
+ case QED_ILT:
+ mfw_res_id = RESOURCE_ILT_E;
+ break;
+ case QED_LL2_RAM_QUEUE:
+ mfw_res_id = RESOURCE_LL2_QUEUE_E;
+ break;
+ case QED_LL2_CTX_QUEUE:
+ mfw_res_id = RESOURCE_LL2_CQS_E;
+ break;
+ case QED_RDMA_CNQ_RAM:
+ case QED_CMDQS_CQS:
+ /* CNQ/CMDQS are the same resource */
+ mfw_res_id = RESOURCE_CQS_E;
+ break;
+ case QED_RDMA_STATS_QUEUE:
+ mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E;
+ break;
+ case QED_BDQ:
+ mfw_res_id = RESOURCE_BDQ_E;
+ break;
+ default:
+ break;
+ }
+
+ return mfw_res_id;
+}
+
+#define QED_RESC_ALLOC_VERSION_MAJOR 2
+#define QED_RESC_ALLOC_VERSION_MINOR 0
+#define QED_RESC_ALLOC_VERSION \
+ ((QED_RESC_ALLOC_VERSION_MAJOR << \
+ DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT) | \
+ (QED_RESC_ALLOC_VERSION_MINOR << \
+ DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT))
+
+struct qed_resc_alloc_in_params {
+ u32 cmd;
+ enum qed_resources res_id;
+ u32 resc_max_val;
+};
+
+struct qed_resc_alloc_out_params {
+ u32 mcp_resp;
+ u32 mcp_param;
+ u32 resc_num;
+ u32 resc_start;
+ u32 vf_resc_num;
+ u32 vf_resc_start;
+ u32 flags;
+};
+
+static int
+qed_mcp_resc_allocation_msg(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_resc_alloc_in_params *p_in_params,
+ struct qed_resc_alloc_out_params *p_out_params)
+{
+ struct qed_mcp_mb_params mb_params;
+ struct resource_info mfw_resc_info;
+ int rc;
+
+ memset(&mfw_resc_info, 0, sizeof(mfw_resc_info));
+
+ mfw_resc_info.res_id = qed_mcp_get_mfw_res_id(p_in_params->res_id);
+ if (mfw_resc_info.res_id == RESOURCE_NUM_INVALID) {
+ DP_ERR(p_hwfn,
+ "Failed to match resource %d [%s] with the MFW resources\n",
+ p_in_params->res_id,
+ qed_hw_get_resc_name(p_in_params->res_id));
+ return -EINVAL;
+ }
+
+ switch (p_in_params->cmd) {
+ case DRV_MSG_SET_RESOURCE_VALUE_MSG:
+ mfw_resc_info.size = p_in_params->resc_max_val;
+ fallthrough;
+ case DRV_MSG_GET_RESOURCE_ALLOC_MSG:
+ break;
+ default:
+ DP_ERR(p_hwfn, "Unexpected resource alloc command [0x%08x]\n",
+ p_in_params->cmd);
+ return -EINVAL;
+ }
+
+ memset(&mb_params, 0, sizeof(mb_params));
+ mb_params.cmd = p_in_params->cmd;
+ mb_params.param = QED_RESC_ALLOC_VERSION;
+ mb_params.p_data_src = &mfw_resc_info;
+ mb_params.data_src_size = sizeof(mfw_resc_info);
+ mb_params.p_data_dst = mb_params.p_data_src;
+ mb_params.data_dst_size = mb_params.data_src_size;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SP,
+ "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n",
+ p_in_params->cmd,
+ p_in_params->res_id,
+ qed_hw_get_resc_name(p_in_params->res_id),
+ QED_MFW_GET_FIELD(mb_params.param,
+ DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
+ QED_MFW_GET_FIELD(mb_params.param,
+ DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
+ p_in_params->resc_max_val);
+
+ rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc)
+ return rc;
+
+ p_out_params->mcp_resp = mb_params.mcp_resp;
+ p_out_params->mcp_param = mb_params.mcp_param;
+ p_out_params->resc_num = mfw_resc_info.size;
+ p_out_params->resc_start = mfw_resc_info.offset;
+ p_out_params->vf_resc_num = mfw_resc_info.vf_size;
+ p_out_params->vf_resc_start = mfw_resc_info.vf_offset;
+ p_out_params->flags = mfw_resc_info.flags;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SP,
+ "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n",
+ QED_MFW_GET_FIELD(p_out_params->mcp_param,
+ FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR),
+ QED_MFW_GET_FIELD(p_out_params->mcp_param,
+ FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR),
+ p_out_params->resc_num,
+ p_out_params->resc_start,
+ p_out_params->vf_resc_num,
+ p_out_params->vf_resc_start, p_out_params->flags);
+
+ return 0;
+}
+
+int
+qed_mcp_set_resc_max_val(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_resources res_id,
+ u32 resc_max_val, u32 *p_mcp_resp)
+{
+ struct qed_resc_alloc_out_params out_params;
+ struct qed_resc_alloc_in_params in_params;
+ int rc;
+
+ memset(&in_params, 0, sizeof(in_params));
+ in_params.cmd = DRV_MSG_SET_RESOURCE_VALUE_MSG;
+ in_params.res_id = res_id;
+ in_params.resc_max_val = resc_max_val;
+ memset(&out_params, 0, sizeof(out_params));
+ rc = qed_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
+ &out_params);
+ if (rc)
+ return rc;
+
+ *p_mcp_resp = out_params.mcp_resp;
+
+ return 0;
+}
+
+int
+qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_resources res_id,
+ u32 *p_mcp_resp, u32 *p_resc_num, u32 *p_resc_start)
+{
+ struct qed_resc_alloc_out_params out_params;
+ struct qed_resc_alloc_in_params in_params;
+ int rc;
+
+ memset(&in_params, 0, sizeof(in_params));
+ in_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG;
+ in_params.res_id = res_id;
+ memset(&out_params, 0, sizeof(out_params));
+ rc = qed_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params,
+ &out_params);
+ if (rc)
+ return rc;
+
+ *p_mcp_resp = out_params.mcp_resp;
+
+ if (*p_mcp_resp == FW_MSG_CODE_RESOURCE_ALLOC_OK) {
+ *p_resc_num = out_params.resc_num;
+ *p_resc_start = out_params.resc_start;
+ }
+
+ return 0;
+}
+
+int qed_mcp_initiate_pf_flr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 mcp_resp, mcp_param;
+
+ return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0,
+ &mcp_resp, &mcp_param);
+}
+
+static int qed_mcp_resource_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 param, u32 *p_mcp_resp, u32 *p_mcp_param)
+{
+ int rc;
+
+ rc = qed_mcp_cmd_nosleep(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD,
+ param, p_mcp_resp, p_mcp_param);
+ if (rc)
+ return rc;
+
+ if (*p_mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
+ DP_INFO(p_hwfn,
+ "The resource command is unsupported by the MFW\n");
+ return -EINVAL;
+ }
+
+ if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) {
+ u8 opcode = QED_MFW_GET_FIELD(param, RESOURCE_CMD_REQ_OPCODE);
+
+ DP_NOTICE(p_hwfn,
+ "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n",
+ param, opcode);
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+static int
+__qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_resc_lock_params *p_params)
+{
+ u32 param = 0, mcp_resp, mcp_param;
+ u8 opcode;
+ int rc;
+
+ switch (p_params->timeout) {
+ case QED_MCP_RESC_LOCK_TO_DEFAULT:
+ opcode = RESOURCE_OPCODE_REQ;
+ p_params->timeout = 0;
+ break;
+ case QED_MCP_RESC_LOCK_TO_NONE:
+ opcode = RESOURCE_OPCODE_REQ_WO_AGING;
+ p_params->timeout = 0;
+ break;
+ default:
+ opcode = RESOURCE_OPCODE_REQ_W_AGING;
+ break;
+ }
+
+ QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
+ QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
+ QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout);
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SP,
+ "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n",
+ param, p_params->timeout, opcode, p_params->resource);
+
+ /* Attempt to acquire the resource */
+ rc = qed_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, &mcp_param);
+ if (rc)
+ return rc;
+
+ /* Analyze the response */
+ p_params->owner = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OWNER);
+ opcode = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SP,
+ "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n",
+ mcp_param, opcode, p_params->owner);
+
+ switch (opcode) {
+ case RESOURCE_OPCODE_GNT:
+ p_params->b_granted = true;
+ break;
+ case RESOURCE_OPCODE_BUSY:
+ p_params->b_granted = false;
+ break;
+ default:
+ DP_NOTICE(p_hwfn,
+ "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n",
+ mcp_param, opcode);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int
+qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, struct qed_resc_lock_params *p_params)
+{
+ u32 retry_cnt = 0;
+ int rc;
+
+ do {
+ /* No need for an interval before the first iteration */
+ if (retry_cnt) {
+ if (p_params->sleep_b4_retry) {
+ u16 retry_interval_in_ms =
+ DIV_ROUND_UP(p_params->retry_interval,
+ 1000);
+
+ msleep(retry_interval_in_ms);
+ } else {
+ udelay(p_params->retry_interval);
+ }
+ }
+
+ rc = __qed_mcp_resc_lock(p_hwfn, p_ptt, p_params);
+ if (rc)
+ return rc;
+
+ if (p_params->b_granted)
+ break;
+ } while (retry_cnt++ < p_params->retry_num);
+
+ return 0;
+}
+
+int
+qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_resc_unlock_params *p_params)
+{
+ u32 param = 0, mcp_resp, mcp_param;
+ u8 opcode;
+ int rc;
+
+ opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE
+ : RESOURCE_OPCODE_RELEASE;
+ QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource);
+ QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n",
+ param, opcode, p_params->resource);
+
+ /* Attempt to release the resource */
+ rc = qed_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, &mcp_param);
+ if (rc)
+ return rc;
+
+ /* Analyze the response */
+ opcode = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "Resource unlock response: mcp_param 0x%08x [opcode %d]\n",
+ mcp_param, opcode);
+
+ switch (opcode) {
+ case RESOURCE_OPCODE_RELEASED_PREVIOUS:
+ DP_INFO(p_hwfn,
+ "Resource unlock request for an already released resource [%d]\n",
+ p_params->resource);
+ fallthrough;
+ case RESOURCE_OPCODE_RELEASED:
+ p_params->b_released = true;
+ break;
+ case RESOURCE_OPCODE_WRONG_OWNER:
+ p_params->b_released = false;
+ break;
+ default:
+ DP_NOTICE(p_hwfn,
+ "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n",
+ mcp_param, opcode);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock,
+ struct qed_resc_unlock_params *p_unlock,
+ enum qed_resc_lock
+ resource, bool b_is_permanent)
+{
+ if (p_lock) {
+ memset(p_lock, 0, sizeof(*p_lock));
+
+ /* Permanent resources don't require aging, and there's no
+ * point in trying to acquire them more than once since it's
+ * unexpected another entity would release them.
+ */
+ if (b_is_permanent) {
+ p_lock->timeout = QED_MCP_RESC_LOCK_TO_NONE;
+ } else {
+ p_lock->retry_num = QED_MCP_RESC_LOCK_RETRY_CNT_DFLT;
+ p_lock->retry_interval =
+ QED_MCP_RESC_LOCK_RETRY_VAL_DFLT;
+ p_lock->sleep_b4_retry = true;
+ }
+
+ p_lock->resource = resource;
+ }
+
+ if (p_unlock) {
+ memset(p_unlock, 0, sizeof(*p_unlock));
+ p_unlock->resource = resource;
+ }
+}
+
+bool qed_mcp_is_smart_an_supported(struct qed_hwfn *p_hwfn)
+{
+ return !!(p_hwfn->mcp_info->capabilities &
+ FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ);
+}
+
+int qed_mcp_get_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 mcp_resp;
+ int rc;
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT,
+ 0, &mcp_resp, &p_hwfn->mcp_info->capabilities);
+ if (!rc)
+ DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_PROBE),
+ "MFW supported features: %08x\n",
+ p_hwfn->mcp_info->capabilities);
+
+ return rc;
+}
+
+int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 mcp_resp, mcp_param, features;
+
+ features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE |
+ DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK |
+ DRV_MB_PARAM_FEATURE_SUPPORT_PORT_FEC_CONTROL;
+
+ return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT,
+ features, &mcp_resp, &mcp_param);
+}
+
+int qed_mcp_get_engine_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_mcp_mb_params mb_params = {0};
+ struct qed_dev *cdev = p_hwfn->cdev;
+ u8 fir_valid, l2_valid;
+ int rc;
+
+ mb_params.cmd = DRV_MSG_CODE_GET_ENGINE_CONFIG;
+ rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc)
+ return rc;
+
+ if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
+ DP_INFO(p_hwfn,
+ "The get_engine_config command is unsupported by the MFW\n");
+ return -EOPNOTSUPP;
+ }
+
+ fir_valid = QED_MFW_GET_FIELD(mb_params.mcp_param,
+ FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID);
+ if (fir_valid)
+ cdev->fir_affin =
+ QED_MFW_GET_FIELD(mb_params.mcp_param,
+ FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE);
+
+ l2_valid = QED_MFW_GET_FIELD(mb_params.mcp_param,
+ FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID);
+ if (l2_valid)
+ cdev->l2_affin_hint =
+ QED_MFW_GET_FIELD(mb_params.mcp_param,
+ FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE);
+
+ DP_INFO(p_hwfn,
+ "Engine affinity config: FIR={valid %hhd, value %hhd}, L2_hint={valid %hhd, value %hhd}\n",
+ fir_valid, cdev->fir_affin, l2_valid, cdev->l2_affin_hint);
+
+ return 0;
+}
+
+int qed_mcp_get_ppfid_bitmap(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_mcp_mb_params mb_params = {0};
+ struct qed_dev *cdev = p_hwfn->cdev;
+ int rc;
+
+ mb_params.cmd = DRV_MSG_CODE_GET_PPFID_BITMAP;
+ rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc)
+ return rc;
+
+ if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
+ DP_INFO(p_hwfn,
+ "The get_ppfid_bitmap command is unsupported by the MFW\n");
+ return -EOPNOTSUPP;
+ }
+
+ cdev->ppfid_bitmap = QED_MFW_GET_FIELD(mb_params.mcp_param,
+ FW_MB_PARAM_PPFID_BITMAP);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP, "PPFID bitmap 0x%hhx\n",
+ cdev->ppfid_bitmap);
+
+ return 0;
+}
+
+int qed_mcp_nvm_get_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u16 option_id, u8 entity_id, u16 flags, u8 *p_buf,
+ u32 *p_len)
+{
+ u32 mb_param = 0, resp, param;
+ int rc;
+
+ QED_MFW_SET_FIELD(mb_param, DRV_MB_PARAM_NVM_CFG_OPTION_ID, option_id);
+ if (flags & QED_NVM_CFG_OPTION_INIT)
+ QED_MFW_SET_FIELD(mb_param,
+ DRV_MB_PARAM_NVM_CFG_OPTION_INIT, 1);
+ if (flags & QED_NVM_CFG_OPTION_FREE)
+ QED_MFW_SET_FIELD(mb_param,
+ DRV_MB_PARAM_NVM_CFG_OPTION_FREE, 1);
+ if (flags & QED_NVM_CFG_OPTION_ENTITY_SEL) {
+ QED_MFW_SET_FIELD(mb_param,
+ DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL, 1);
+ QED_MFW_SET_FIELD(mb_param,
+ DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID,
+ entity_id);
+ }
+
+ rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
+ DRV_MSG_CODE_GET_NVM_CFG_OPTION,
+ mb_param, &resp, &param, p_len,
+ (u32 *)p_buf, false);
+
+ return rc;
+}
+
+int qed_mcp_nvm_set_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u16 option_id, u8 entity_id, u16 flags, u8 *p_buf,
+ u32 len)
+{
+ u32 mb_param = 0, resp, param;
+
+ QED_MFW_SET_FIELD(mb_param, DRV_MB_PARAM_NVM_CFG_OPTION_ID, option_id);
+ if (flags & QED_NVM_CFG_OPTION_ALL)
+ QED_MFW_SET_FIELD(mb_param,
+ DRV_MB_PARAM_NVM_CFG_OPTION_ALL, 1);
+ if (flags & QED_NVM_CFG_OPTION_INIT)
+ QED_MFW_SET_FIELD(mb_param,
+ DRV_MB_PARAM_NVM_CFG_OPTION_INIT, 1);
+ if (flags & QED_NVM_CFG_OPTION_COMMIT)
+ QED_MFW_SET_FIELD(mb_param,
+ DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT, 1);
+ if (flags & QED_NVM_CFG_OPTION_FREE)
+ QED_MFW_SET_FIELD(mb_param,
+ DRV_MB_PARAM_NVM_CFG_OPTION_FREE, 1);
+ if (flags & QED_NVM_CFG_OPTION_ENTITY_SEL) {
+ QED_MFW_SET_FIELD(mb_param,
+ DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL, 1);
+ QED_MFW_SET_FIELD(mb_param,
+ DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID,
+ entity_id);
+ }
+
+ return qed_mcp_nvm_wr_cmd(p_hwfn, p_ptt,
+ DRV_MSG_CODE_SET_NVM_CFG_OPTION,
+ mb_param, &resp, &param, len, (u32 *)p_buf);
+}
+
+#define QED_MCP_DBG_DATA_MAX_SIZE MCP_DRV_NVM_BUF_LEN
+#define QED_MCP_DBG_DATA_MAX_HEADER_SIZE sizeof(u32)
+#define QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE \
+ (QED_MCP_DBG_DATA_MAX_SIZE - QED_MCP_DBG_DATA_MAX_HEADER_SIZE)
+
+static int
+__qed_mcp_send_debug_data(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 *p_buf, u8 size)
+{
+ struct qed_mcp_mb_params mb_params;
+ int rc;
+
+ if (size > QED_MCP_DBG_DATA_MAX_SIZE) {
+ DP_ERR(p_hwfn,
+ "Debug data size is %d while it should not exceed %d\n",
+ size, QED_MCP_DBG_DATA_MAX_SIZE);
+ return -EINVAL;
+ }
+
+ memset(&mb_params, 0, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_DEBUG_DATA_SEND;
+ SET_MFW_FIELD(mb_params.param, DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE, size);
+ mb_params.p_data_src = p_buf;
+ mb_params.data_src_size = size;
+ rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc)
+ return rc;
+
+ if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) {
+ DP_INFO(p_hwfn,
+ "The DEBUG_DATA_SEND command is unsupported by the MFW\n");
+ return -EOPNOTSUPP;
+ } else if (mb_params.mcp_resp == (u32)FW_MSG_CODE_DEBUG_NOT_ENABLED) {
+ DP_INFO(p_hwfn, "The DEBUG_DATA_SEND command is not enabled\n");
+ return -EBUSY;
+ } else if (mb_params.mcp_resp != (u32)FW_MSG_CODE_DEBUG_DATA_SEND_OK) {
+ DP_NOTICE(p_hwfn,
+ "Failed to send debug data to the MFW [resp 0x%08x]\n",
+ mb_params.mcp_resp);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+enum qed_mcp_dbg_data_type {
+ QED_MCP_DBG_DATA_TYPE_RAW,
+};
+
+/* Header format: [31:28] PFID, [27:20] flags, [19:12] type, [11:0] S/N */
+#define QED_MCP_DBG_DATA_HDR_SN_OFFSET 0
+#define QED_MCP_DBG_DATA_HDR_SN_MASK 0x00000fff
+#define QED_MCP_DBG_DATA_HDR_TYPE_OFFSET 12
+#define QED_MCP_DBG_DATA_HDR_TYPE_MASK 0x000ff000
+#define QED_MCP_DBG_DATA_HDR_FLAGS_OFFSET 20
+#define QED_MCP_DBG_DATA_HDR_FLAGS_MASK 0x0ff00000
+#define QED_MCP_DBG_DATA_HDR_PF_OFFSET 28
+#define QED_MCP_DBG_DATA_HDR_PF_MASK 0xf0000000
+
+#define QED_MCP_DBG_DATA_HDR_FLAGS_FIRST 0x1
+#define QED_MCP_DBG_DATA_HDR_FLAGS_LAST 0x2
+
+static int
+qed_mcp_send_debug_data(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_mcp_dbg_data_type type, u8 *p_buf, u32 size)
+{
+ u8 raw_data[QED_MCP_DBG_DATA_MAX_SIZE], *p_tmp_buf = p_buf;
+ u32 tmp_size = size, *p_header, *p_payload;
+ u8 flags = 0;
+ u16 seq;
+ int rc;
+
+ p_header = (u32 *)raw_data;
+ p_payload = (u32 *)(raw_data + QED_MCP_DBG_DATA_MAX_HEADER_SIZE);
+
+ seq = (u16)atomic_inc_return(&p_hwfn->mcp_info->dbg_data_seq);
+
+ /* First chunk is marked as 'first' */
+ flags |= QED_MCP_DBG_DATA_HDR_FLAGS_FIRST;
+
+ *p_header = 0;
+ SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_SN, seq);
+ SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_TYPE, type);
+ SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_FLAGS, flags);
+ SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_PF, p_hwfn->abs_pf_id);
+
+ while (tmp_size > QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE) {
+ memcpy(p_payload, p_tmp_buf, QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE);
+ rc = __qed_mcp_send_debug_data(p_hwfn, p_ptt, raw_data,
+ QED_MCP_DBG_DATA_MAX_SIZE);
+ if (rc)
+ return rc;
+
+ /* Clear the 'first' marking after sending the first chunk */
+ if (p_tmp_buf == p_buf) {
+ flags &= ~QED_MCP_DBG_DATA_HDR_FLAGS_FIRST;
+ SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_FLAGS,
+ flags);
+ }
+
+ p_tmp_buf += QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE;
+ tmp_size -= QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE;
+ }
+
+ /* Last chunk is marked as 'last' */
+ flags |= QED_MCP_DBG_DATA_HDR_FLAGS_LAST;
+ SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_FLAGS, flags);
+ memcpy(p_payload, p_tmp_buf, tmp_size);
+
+ /* Casting the left size to u8 is ok since at this point it is <= 32 */
+ return __qed_mcp_send_debug_data(p_hwfn, p_ptt, raw_data,
+ (u8)(QED_MCP_DBG_DATA_MAX_HEADER_SIZE +
+ tmp_size));
+}
+
+int
+qed_mcp_send_raw_debug_data(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 *p_buf, u32 size)
+{
+ return qed_mcp_send_debug_data(p_hwfn, p_ptt,
+ QED_MCP_DBG_DATA_TYPE_RAW, p_buf, size);
+}
+
+bool qed_mcp_is_esl_supported(struct qed_hwfn *p_hwfn)
+{
+ return !!(p_hwfn->mcp_info->capabilities &
+ FW_MB_PARAM_FEATURE_SUPPORT_ENHANCED_SYS_LCK);
+}
+
+int qed_mcp_get_esl_status(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool *active)
+{
+ u32 resp = 0, param = 0;
+ int rc;
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MANAGEMENT_STATUS, 0, &resp, &param);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to send ESL command, rc = %d\n", rc);
+ return rc;
+ }
+
+ *active = !!(param & FW_MB_PARAM_MANAGEMENT_STATUS_LOCKDOWN_ENABLED);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
new file mode 100644
index 000000000..9bd0565fe
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -0,0 +1,1396 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ * Copyright (c) 2019-2020 Marvell International Ltd.
+ */
+
+#ifndef _QED_MCP_H
+#define _QED_MCP_H
+
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/qed/qed_fcoe_if.h>
+#include "qed_hsi.h"
+#include "qed_dev_api.h"
+
+#define QED_MFW_REPORT_STR_SIZE 256
+
+struct qed_mcp_link_speed_params {
+ bool autoneg;
+
+ u32 advertised_speeds;
+#define QED_EXT_SPEED_MASK_RES 0x1
+#define QED_EXT_SPEED_MASK_1G 0x2
+#define QED_EXT_SPEED_MASK_10G 0x4
+#define QED_EXT_SPEED_MASK_20G 0x8
+#define QED_EXT_SPEED_MASK_25G 0x10
+#define QED_EXT_SPEED_MASK_40G 0x20
+#define QED_EXT_SPEED_MASK_50G_R 0x40
+#define QED_EXT_SPEED_MASK_50G_R2 0x80
+#define QED_EXT_SPEED_MASK_100G_R2 0x100
+#define QED_EXT_SPEED_MASK_100G_R4 0x200
+#define QED_EXT_SPEED_MASK_100G_P4 0x400
+
+ u32 forced_speed; /* In Mb/s */
+#define QED_EXT_SPEED_1G 0x1
+#define QED_EXT_SPEED_10G 0x2
+#define QED_EXT_SPEED_20G 0x4
+#define QED_EXT_SPEED_25G 0x8
+#define QED_EXT_SPEED_40G 0x10
+#define QED_EXT_SPEED_50G_R 0x20
+#define QED_EXT_SPEED_50G_R2 0x40
+#define QED_EXT_SPEED_100G_R2 0x80
+#define QED_EXT_SPEED_100G_R4 0x100
+#define QED_EXT_SPEED_100G_P4 0x200
+};
+
+struct qed_mcp_link_pause_params {
+ bool autoneg;
+ bool forced_rx;
+ bool forced_tx;
+};
+
+enum qed_mcp_eee_mode {
+ QED_MCP_EEE_DISABLED,
+ QED_MCP_EEE_ENABLED,
+ QED_MCP_EEE_UNSUPPORTED
+};
+
+struct qed_mcp_link_params {
+ struct qed_mcp_link_speed_params speed;
+ struct qed_mcp_link_pause_params pause;
+ u32 loopback_mode;
+ struct qed_link_eee_params eee;
+ u32 fec;
+
+ struct qed_mcp_link_speed_params ext_speed;
+ u32 ext_fec_mode;
+};
+
+struct qed_mcp_link_capabilities {
+ u32 speed_capabilities;
+ bool default_speed_autoneg;
+ u32 fec_default;
+ enum qed_mcp_eee_mode default_eee;
+ u32 eee_lpi_timer;
+ u8 eee_speed_caps;
+
+ u32 default_ext_speed_caps;
+ u32 default_ext_autoneg;
+ u32 default_ext_speed;
+ u32 default_ext_fec;
+};
+
+struct qed_mcp_link_state {
+ bool link_up;
+ u32 min_pf_rate;
+
+ /* Actual link speed in Mb/s */
+ u32 line_speed;
+
+ /* PF max speed in Mb/s, deduced from line_speed
+ * according to PF max bandwidth configuration.
+ */
+ u32 speed;
+
+ bool full_duplex;
+ bool an;
+ bool an_complete;
+ bool parallel_detection;
+ bool pfc_enabled;
+
+ u32 partner_adv_speed;
+#define QED_LINK_PARTNER_SPEED_1G_HD BIT(0)
+#define QED_LINK_PARTNER_SPEED_1G_FD BIT(1)
+#define QED_LINK_PARTNER_SPEED_10G BIT(2)
+#define QED_LINK_PARTNER_SPEED_20G BIT(3)
+#define QED_LINK_PARTNER_SPEED_25G BIT(4)
+#define QED_LINK_PARTNER_SPEED_40G BIT(5)
+#define QED_LINK_PARTNER_SPEED_50G BIT(6)
+#define QED_LINK_PARTNER_SPEED_100G BIT(7)
+
+ bool partner_tx_flow_ctrl_en;
+ bool partner_rx_flow_ctrl_en;
+
+ u8 partner_adv_pause;
+#define QED_LINK_PARTNER_SYMMETRIC_PAUSE 0x1
+#define QED_LINK_PARTNER_ASYMMETRIC_PAUSE 0x2
+#define QED_LINK_PARTNER_BOTH_PAUSE 0x3
+
+ bool sfp_tx_fault;
+ bool eee_active;
+ u8 eee_adv_caps;
+ u8 eee_lp_adv_caps;
+
+ u32 fec_active;
+};
+
+struct qed_mcp_function_info {
+ u8 pause_on_host;
+
+ enum qed_pci_personality protocol;
+
+ u8 bandwidth_min;
+ u8 bandwidth_max;
+
+ u8 mac[ETH_ALEN];
+
+ u64 wwn_port;
+ u64 wwn_node;
+
+#define QED_MCP_VLAN_UNSET (0xffff)
+ u16 ovlan;
+
+ u16 mtu;
+};
+
+struct qed_mcp_nvm_common {
+ u32 offset;
+ u32 param;
+ u32 resp;
+ u32 cmd;
+};
+
+struct qed_mcp_drv_version {
+ u32 version;
+ u8 name[MCP_DRV_VER_STR_SIZE - 4];
+};
+
+struct qed_mcp_lan_stats {
+ u64 ucast_rx_pkts;
+ u64 ucast_tx_pkts;
+ u32 fcs_err;
+};
+
+struct qed_mcp_fcoe_stats {
+ u64 rx_pkts;
+ u64 tx_pkts;
+ u32 fcs_err;
+ u32 login_failure;
+};
+
+struct qed_mcp_iscsi_stats {
+ u64 rx_pdus;
+ u64 tx_pdus;
+ u64 rx_bytes;
+ u64 tx_bytes;
+};
+
+struct qed_mcp_rdma_stats {
+ u64 rx_pkts;
+ u64 tx_pkts;
+ u64 rx_bytes;
+ u64 tx_byts;
+};
+
+enum qed_mcp_protocol_type {
+ QED_MCP_LAN_STATS,
+ QED_MCP_FCOE_STATS,
+ QED_MCP_ISCSI_STATS,
+ QED_MCP_RDMA_STATS
+};
+
+union qed_mcp_protocol_stats {
+ struct qed_mcp_lan_stats lan_stats;
+ struct qed_mcp_fcoe_stats fcoe_stats;
+ struct qed_mcp_iscsi_stats iscsi_stats;
+ struct qed_mcp_rdma_stats rdma_stats;
+};
+
+enum qed_ov_eswitch {
+ QED_OV_ESWITCH_NONE,
+ QED_OV_ESWITCH_VEB,
+ QED_OV_ESWITCH_VEPA
+};
+
+enum qed_ov_client {
+ QED_OV_CLIENT_DRV,
+ QED_OV_CLIENT_USER,
+ QED_OV_CLIENT_VENDOR_SPEC
+};
+
+enum qed_ov_driver_state {
+ QED_OV_DRIVER_STATE_NOT_LOADED,
+ QED_OV_DRIVER_STATE_DISABLED,
+ QED_OV_DRIVER_STATE_ACTIVE
+};
+
+enum qed_ov_wol {
+ QED_OV_WOL_DEFAULT,
+ QED_OV_WOL_DISABLED,
+ QED_OV_WOL_ENABLED
+};
+
+enum qed_mfw_tlv_type {
+ QED_MFW_TLV_GENERIC = 0x1, /* Core driver TLVs */
+ QED_MFW_TLV_ETH = 0x2, /* L2 driver TLVs */
+ QED_MFW_TLV_FCOE = 0x4, /* FCoE protocol TLVs */
+ QED_MFW_TLV_ISCSI = 0x8, /* SCSI protocol TLVs */
+ QED_MFW_TLV_MAX = 0x16,
+};
+
+struct qed_mfw_tlv_generic {
+#define QED_MFW_TLV_FLAGS_SIZE 2
+ struct {
+ u8 ipv4_csum_offload;
+ u8 lso_supported;
+ bool b_set;
+ } flags;
+
+#define QED_MFW_TLV_MAC_COUNT 3
+ /* First entry for primary MAC, 2 secondary MACs possible */
+ u8 mac[QED_MFW_TLV_MAC_COUNT][6];
+ bool mac_set[QED_MFW_TLV_MAC_COUNT];
+
+ u64 rx_frames;
+ bool rx_frames_set;
+ u64 rx_bytes;
+ bool rx_bytes_set;
+ u64 tx_frames;
+ bool tx_frames_set;
+ u64 tx_bytes;
+ bool tx_bytes_set;
+};
+
+union qed_mfw_tlv_data {
+ struct qed_mfw_tlv_generic generic;
+ struct qed_mfw_tlv_eth eth;
+ struct qed_mfw_tlv_fcoe fcoe;
+ struct qed_mfw_tlv_iscsi iscsi;
+};
+
+#define QED_NVM_CFG_OPTION_ALL BIT(0)
+#define QED_NVM_CFG_OPTION_INIT BIT(1)
+#define QED_NVM_CFG_OPTION_COMMIT BIT(2)
+#define QED_NVM_CFG_OPTION_FREE BIT(3)
+#define QED_NVM_CFG_OPTION_ENTITY_SEL BIT(4)
+
+/**
+ * qed_mcp_get_link_params(): Returns the link params of the hw function.
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Returns: Pointer to link params.
+ */
+struct qed_mcp_link_params *qed_mcp_get_link_params(struct qed_hwfn *p_hwfn);
+
+/**
+ * qed_mcp_get_link_state(): Return the link state of the hw function.
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Returns: Pointer to link state.
+ */
+struct qed_mcp_link_state *qed_mcp_get_link_state(struct qed_hwfn *p_hwfn);
+
+/**
+ * qed_mcp_get_link_capabilities(): Return the link capabilities of the
+ * hw function.
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Returns: Pointer to link capabilities.
+ */
+struct qed_mcp_link_capabilities
+ *qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn);
+
+/**
+ * qed_mcp_set_link(): Request the MFW to set the link according
+ * to 'link_input'.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @b_up: Raise link if `true'. Reset link if `false'.
+ *
+ * Return: Int.
+ */
+int qed_mcp_set_link(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ bool b_up);
+
+/**
+ * qed_mcp_get_mfw_ver(): Get the management firmware version value.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @p_mfw_ver: MFW version value.
+ * @p_running_bundle_id: Image id in nvram; Optional.
+ *
+ * Return: Int - 0 - operation was successful.
+ */
+int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *p_mfw_ver, u32 *p_running_bundle_id);
+
+/**
+ * qed_mcp_get_mbi_ver(): Get the MBI version value.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @p_mbi_ver: A pointer to a variable to be filled with the MBI version.
+ *
+ * Return: Int - 0 - operation was successful.
+ */
+int qed_mcp_get_mbi_ver(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *p_mbi_ver);
+
+/**
+ * qed_mcp_get_media_type(): Get media type value of the port.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @media_type: Media type value
+ *
+ * Return: Int - 0 - Operation was successul.
+ * -EBUSY - Operation failed
+ */
+int qed_mcp_get_media_type(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *media_type);
+
+/**
+ * qed_mcp_get_transceiver_data(): Get transceiver data of the port.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @p_transceiver_state: Transceiver state.
+ * @p_tranceiver_type: Media type value.
+ *
+ * Return: Int - 0 - Operation was successul.
+ * -EBUSY - Operation failed
+ */
+int qed_mcp_get_transceiver_data(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *p_transceiver_state,
+ u32 *p_tranceiver_type);
+
+/**
+ * qed_mcp_trans_speed_mask(): Get transceiver supported speed mask.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @p_speed_mask: Bit mask of all supported speeds.
+ *
+ * Return: Int - 0 - Operation was successul.
+ * -EBUSY - Operation failed
+ */
+
+int qed_mcp_trans_speed_mask(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *p_speed_mask);
+
+/**
+ * qed_mcp_get_board_config(): Get board configuration.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @p_board_config: Board config.
+ *
+ * Return: Int - 0 - Operation was successul.
+ * -EBUSY - Operation failed
+ */
+int qed_mcp_get_board_config(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *p_board_config);
+
+/**
+ * qed_mcp_cmd(): Sleepable function for sending commands to the MCP
+ * mailbox. It acquire mutex lock for the entire
+ * operation, from sending the request until the MCP
+ * response. Waiting for MCP response will be checked up
+ * to 5 seconds every 10ms. Should not be called from atomic
+ * context.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
+ * @cmd: command to be sent to the MCP.
+ * @param: Optional param
+ * @o_mcp_resp: The MCP response code (exclude sequence).
+ * @o_mcp_param: Optional parameter provided by the MCP
+ * response
+ *
+ * Return: Int - 0 - Operation was successul.
+ */
+int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 cmd,
+ u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param);
+
+/**
+ * qed_mcp_cmd_nosleep(): Function for sending commands to the MCP
+ * mailbox. It acquire mutex lock for the entire
+ * operation, from sending the request until the MCP
+ * response. Waiting for MCP response will be checked up
+ * to 5 seconds every 10us. Should be called when sleep
+ * is not allowed.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
+ * @cmd: command to be sent to the MCP.
+ * @param: Optional param
+ * @o_mcp_resp: The MCP response code (exclude sequence).
+ * @o_mcp_param: Optional parameter provided by the MCP
+ * response
+ *
+ * Return: Int - 0 - Operation was successul.
+ */
+int qed_mcp_cmd_nosleep(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 cmd,
+ u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param);
+
+/**
+ * qed_mcp_drain(): drains the nig, allowing completion to pass in
+ * case of pauses.
+ * (Should be called only from sleepable context)
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
+ *
+ * Return: Int.
+ */
+int qed_mcp_drain(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * qed_mcp_get_flash_size(): Get the flash size value.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
+ * @p_flash_size: Flash size in bytes to be filled.
+ *
+ * Return: Int - 0 - Operation was successul.
+ */
+int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *p_flash_size);
+
+/**
+ * qed_mcp_send_drv_version(): Send driver version to MFW.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
+ * @p_ver: Version value.
+ *
+ * Return: Int - 0 - Operation was successul.
+ */
+int
+qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_mcp_drv_version *p_ver);
+
+/**
+ * qed_get_process_kill_counter(): Read the MFW process kill counter.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
+ *
+ * Return: u32.
+ */
+u32 qed_get_process_kill_counter(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * qed_start_recovery_process(): Trigger a recovery process.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
+ *
+ * Return: Int.
+ */
+int qed_start_recovery_process(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+/**
+ * qed_recovery_prolog(): A recovery handler must call this function
+ * as its first step.
+ * It is assumed that the handler is not run from
+ * an interrupt context.
+ *
+ * @cdev: Qed dev pointer.
+ *
+ * Return: int.
+ */
+int qed_recovery_prolog(struct qed_dev *cdev);
+
+/**
+ * qed_mcp_ov_update_current_config(): Notify MFW about the change in base
+ * device properties
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @client: Qed client type.
+ *
+ * Return: Int - 0 - Operation was successul.
+ */
+int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_ov_client client);
+
+/**
+ * qed_mcp_ov_update_driver_state(): Notify MFW about the driver state.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @drv_state: Driver state.
+ *
+ * Return: Int - 0 - Operation was successul.
+ */
+int qed_mcp_ov_update_driver_state(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_ov_driver_state drv_state);
+
+/**
+ * qed_mcp_ov_update_mtu(): Send MTU size to MFW.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @mtu: MTU size.
+ *
+ * Return: Int - 0 - Operation was successul.
+ */
+int qed_mcp_ov_update_mtu(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u16 mtu);
+
+/**
+ * qed_mcp_ov_update_mac(): Send MAC address to MFW.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @mac: MAC address.
+ *
+ * Return: Int - 0 - Operation was successul.
+ */
+int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, const u8 *mac);
+
+/**
+ * qed_mcp_ov_update_wol(): Send WOL mode to MFW.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @wol: WOL mode.
+ *
+ * Return: Int - 0 - Operation was successul.
+ */
+int qed_mcp_ov_update_wol(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_ov_wol wol);
+
+/**
+ * qed_mcp_set_led(): Set LED status.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @mode: LED mode.
+ *
+ * Return: Int - 0 - Operation was successul.
+ */
+int qed_mcp_set_led(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_led_mode mode);
+
+/**
+ * qed_mcp_nvm_read(): Read from NVM.
+ *
+ * @cdev: Qed dev pointer.
+ * @addr: NVM offset.
+ * @p_buf: NVM read buffer.
+ * @len: Buffer len.
+ *
+ * Return: Int - 0 - Operation was successul.
+ */
+int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len);
+
+/**
+ * qed_mcp_nvm_write(): Write to NVM.
+ *
+ * @cdev: Qed dev pointer.
+ * @addr: NVM offset.
+ * @cmd: NVM command.
+ * @p_buf: NVM write buffer.
+ * @len: Buffer len.
+ *
+ * Return: Int - 0 - Operation was successul.
+ */
+int qed_mcp_nvm_write(struct qed_dev *cdev,
+ u32 cmd, u32 addr, u8 *p_buf, u32 len);
+
+/**
+ * qed_mcp_nvm_resp(): Check latest response.
+ *
+ * @cdev: Qed dev pointer.
+ * @p_buf: NVM write buffer.
+ *
+ * Return: Int - 0 - Operation was successul.
+ */
+int qed_mcp_nvm_resp(struct qed_dev *cdev, u8 *p_buf);
+
+struct qed_nvm_image_att {
+ u32 start_addr;
+ u32 length;
+};
+
+/**
+ * qed_mcp_get_nvm_image_att(): Allows reading a whole nvram image.
+ *
+ * @p_hwfn: HW device data.
+ * @image_id: Image to get attributes for.
+ * @p_image_att: Image attributes structure into which to fill data.
+ *
+ * Return: Int - 0 - Operation was successul.
+ */
+int
+qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
+ enum qed_nvm_images image_id,
+ struct qed_nvm_image_att *p_image_att);
+
+/**
+ * qed_mcp_get_nvm_image(): Allows reading a whole nvram image.
+ *
+ * @p_hwfn: HW device data.
+ * @image_id: image requested for reading.
+ * @p_buffer: allocated buffer into which to fill data.
+ * @buffer_len: length of the allocated buffer.
+ *
+ * Return: 0 if p_buffer now contains the nvram image.
+ */
+int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn,
+ enum qed_nvm_images image_id,
+ u8 *p_buffer, u32 buffer_len);
+
+/**
+ * qed_mcp_bist_register_test(): Bist register test.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
+ *
+ * Return: Int - 0 - Operation was successul.
+ */
+int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * qed_mcp_bist_clock_test(): Bist clock test.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
+ *
+ * Return: Int - 0 - Operation was successul.
+ */
+int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * qed_mcp_bist_nvm_get_num_images(): Bist nvm test - get number of images.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
+ * @num_images: number of images if operation was
+ * successful. 0 if not.
+ *
+ * Return: Int - 0 - Operation was successul.
+ */
+int qed_mcp_bist_nvm_get_num_images(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *num_images);
+
+/**
+ * qed_mcp_bist_nvm_get_image_att(): Bist nvm test - get image attributes
+ * by index.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
+ * @p_image_att: Attributes of image.
+ * @image_index: Index of image to get information for.
+ *
+ * Return: Int - 0 - Operation was successul.
+ */
+int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct bist_nvm_image_att *p_image_att,
+ u32 image_index);
+
+/**
+ * qed_mfw_process_tlv_req(): Processes the TLV request from MFW i.e.,
+ * get the required TLV info
+ * from the qed client and send it to the MFW.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ *
+ * Return: 0 upon success.
+ */
+int qed_mfw_process_tlv_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+/**
+ * qed_mcp_send_raw_debug_data(): Send raw debug data to the MFW
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @p_buf: raw debug data buffer.
+ * @size: Buffer size.
+ *
+ * Return : Int.
+ */
+int
+qed_mcp_send_raw_debug_data(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 *p_buf, u32 size);
+
+/* Using hwfn number (and not pf_num) is required since in CMT mode,
+ * same pf_num may be used by two different hwfn
+ * TODO - this shouldn't really be in .h file, but until all fields
+ * required during hw-init will be placed in their correct place in shmem
+ * we need it in qed_dev.c [for readin the nvram reflection in shmem].
+ */
+#define MCP_PF_ID_BY_REL(p_hwfn, rel_pfid) (QED_IS_BB((p_hwfn)->cdev) ? \
+ ((rel_pfid) | \
+ ((p_hwfn)->abs_pf_id & 1) << 3) : \
+ rel_pfid)
+#define MCP_PF_ID(p_hwfn) MCP_PF_ID_BY_REL(p_hwfn, (p_hwfn)->rel_pf_id)
+
+struct qed_mcp_info {
+ /* List for mailbox commands which were sent and wait for a response */
+ struct list_head cmd_list;
+
+ /* Spinlock used for protecting the access to the mailbox commands list
+ * and the sending of the commands.
+ */
+ spinlock_t cmd_lock;
+
+ /* Flag to indicate whether sending a MFW mailbox command is blocked */
+ bool b_block_cmd;
+
+ /* Spinlock used for syncing SW link-changes and link-changes
+ * originating from attention context.
+ */
+ spinlock_t link_lock;
+
+ u32 public_base;
+ u32 drv_mb_addr;
+ u32 mfw_mb_addr;
+ u32 port_addr;
+ u16 drv_mb_seq;
+ u16 drv_pulse_seq;
+ struct qed_mcp_link_params link_input;
+ struct qed_mcp_link_state link_output;
+ struct qed_mcp_link_capabilities link_capabilities;
+ struct qed_mcp_function_info func_info;
+ u8 *mfw_mb_cur;
+ u8 *mfw_mb_shadow;
+ u16 mfw_mb_length;
+ u32 mcp_hist;
+
+ /* Capabilties negotiated with the MFW */
+ u32 capabilities;
+
+ /* S/N for debug data mailbox commands */
+ atomic_t dbg_data_seq;
+
+ /* Spinlock used to sync the flag mcp_handling_status with
+ * the mfw events handler
+ */
+ spinlock_t unload_lock;
+ unsigned long mcp_handling_status;
+#define QED_MCP_BYPASS_PROC_BIT 0
+#define QED_MCP_IN_PROCESSING_BIT 1
+};
+
+struct qed_mcp_mb_params {
+ u32 cmd;
+ u32 param;
+ void *p_data_src;
+ void *p_data_dst;
+ u8 data_src_size;
+ u8 data_dst_size;
+ u32 mcp_resp;
+ u32 mcp_param;
+ u32 flags;
+#define QED_MB_FLAG_CAN_SLEEP (0x1 << 0)
+#define QED_MB_FLAG_AVOID_BLOCK (0x1 << 1)
+#define QED_MB_FLAGS_IS_SET(params, flag) \
+ ({ typeof(params) __params = (params); \
+ (__params && (__params->flags & QED_MB_FLAG_ ## flag)); })
+};
+
+struct qed_drv_tlv_hdr {
+ u8 tlv_type;
+ u8 tlv_length; /* In dwords - not including this header */
+ u8 tlv_reserved;
+#define QED_DRV_TLV_FLAGS_CHANGED 0x01
+ u8 tlv_flags;
+};
+
+/**
+ * qed_mcp_is_ext_speed_supported() - Check if management firmware supports
+ * extended speeds.
+ * @p_hwfn: HW device data.
+ *
+ * Return: true if supported, false otherwise.
+ */
+static inline bool
+qed_mcp_is_ext_speed_supported(const struct qed_hwfn *p_hwfn)
+{
+ return !!(p_hwfn->mcp_info->capabilities &
+ FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL);
+}
+
+/**
+ * qed_mcp_cmd_init(): Initialize the interface with the MCP.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
+ *
+ * Return: Int.
+ */
+int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * qed_mcp_cmd_port_init(): Initialize the port interface with the MCP
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ *
+ * Return: Void.
+ *
+ * Can only be called after `num_ports_in_engines' is set
+ */
+void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+/**
+ * qed_mcp_free(): Releases resources allocated during the init process.
+ *
+ * @p_hwfn: HW function.
+ *
+ * Return: Int.
+ */
+
+int qed_mcp_free(struct qed_hwfn *p_hwfn);
+
+/**
+ * qed_mcp_handle_events(): This function is called from the DPC context.
+ * After pointing PTT to the mfw mb, check for events sent by
+ * the MCP to the driver and ack them. In case a critical event
+ * detected, it will be handled here, otherwise the work will be
+ * queued to a sleepable work-queue.
+ *
+ * @p_hwfn: HW function.
+ * @p_ptt: PTT required for register access.
+ *
+ * Return: Int - 0 - Operation was successul.
+ */
+int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+enum qed_drv_role {
+ QED_DRV_ROLE_OS,
+ QED_DRV_ROLE_KDUMP,
+};
+
+struct qed_load_req_params {
+ /* Input params */
+ enum qed_drv_role drv_role;
+ u8 timeout_val;
+ bool avoid_eng_reset;
+ enum qed_override_force_load override_force_load;
+
+ /* Output params */
+ u32 load_code;
+};
+
+/**
+ * qed_mcp_load_req(): Sends a LOAD_REQ to the MFW, and in case the
+ * operation succeeds, returns whether this PF is
+ * the first on the engine/port or function.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @p_params: Params.
+ *
+ * Return: Int - 0 - Operation was successul.
+ */
+int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_load_req_params *p_params);
+
+/**
+ * qed_mcp_load_done(): Sends a LOAD_DONE message to the MFW.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ *
+ * Return: Int - 0 - Operation was successul.
+ */
+int qed_mcp_load_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+/**
+ * qed_mcp_unload_req(): Sends a UNLOAD_REQ message to the MFW.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ *
+ * Return: Int - 0 - Operation was successul.
+ */
+int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+/**
+ * qed_mcp_unload_done(): Sends a UNLOAD_DONE message to the MFW
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ *
+ * Return: Int - 0 - Operation was successul.
+ */
+int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+/**
+ * qed_mcp_read_mb(): Read the MFW mailbox into Current buffer.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ *
+ * Return: Void.
+ */
+void qed_mcp_read_mb(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * qed_mcp_ack_vf_flr(): Ack to mfw that driver finished FLR process for VFs
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @vfs_to_ack: bit mask of all engine VFs for which the PF acks.
+ *
+ * Return: Int - 0 - Operation was successul.
+ */
+int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *vfs_to_ack);
+
+/**
+ * qed_mcp_fill_shmem_func_info(): Calls during init to read shmem of
+ * all function-related info.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ *
+ * Return: 0 upon success.
+ */
+int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * qed_mcp_reset(): Reset the MCP using mailbox command.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ *
+ * Return: 0 upon success.
+ */
+int qed_mcp_reset(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * qed_mcp_nvm_rd_cmd(): Sends an NVM read command request to the MFW to get
+ * a buffer.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @cmd: (Command) DRV_MSG_CODE_NVM_GET_FILE_DATA or
+ * DRV_MSG_CODE_NVM_READ_NVRAM commands.
+ * @param: [0:23] - Offset [24:31] - Size.
+ * @o_mcp_resp: MCP response.
+ * @o_mcp_param: MCP response param.
+ * @o_txn_size: Buffer size output.
+ * @o_buf: Pointer to the buffer returned by the MFW.
+ * @b_can_sleep: Can sleep.
+ *
+ * Return: 0 upon success.
+ */
+int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 cmd,
+ u32 param,
+ u32 *o_mcp_resp,
+ u32 *o_mcp_param,
+ u32 *o_txn_size, u32 *o_buf, bool b_can_sleep);
+
+/**
+ * qed_mcp_phy_sfp_read(): Read from sfp.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
+ * @port: transceiver port.
+ * @addr: I2C address.
+ * @offset: offset in sfp.
+ * @len: buffer length.
+ * @p_buf: buffer to read into.
+ *
+ * Return: Int - 0 - Operation was successul.
+ */
+int qed_mcp_phy_sfp_read(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u32 port, u32 addr, u32 offset, u32 len, u8 *p_buf);
+
+/**
+ * qed_mcp_is_init(): indicates whether the MFW objects [under mcp_info]
+ * are accessible
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Return: true if MFW is running and mcp_info is initialized.
+ */
+bool qed_mcp_is_init(struct qed_hwfn *p_hwfn);
+
+/**
+ * qed_mcp_config_vf_msix(): Request MFW to configure MSI-X for a VF.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @vf_id: absolute inside engine.
+ * @num: number of entries to request.
+ *
+ * Return: Int.
+ */
+int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 vf_id, u8 num);
+
+/**
+ * qed_mcp_halt(): Halt the MCP.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ *
+ * Return: 0 upon success.
+ */
+int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+/**
+ * qed_mcp_resume: Wake up the MCP.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ *
+ * Return: 0 upon success.
+ */
+int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw);
+int qed_configure_pf_max_bandwidth(struct qed_dev *cdev, u8 max_bw);
+int __qed_configure_pf_max_bandwidth(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_mcp_link_state *p_link,
+ u8 max_bw);
+int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_mcp_link_state *p_link,
+ u8 min_bw);
+
+int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 mask_parities);
+
+/* qed_mcp_mdump_get_retain(): Gets the mdump retained data from the MFW.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @p_mdump_retain: mdump retain.
+ *
+ * Return: Int - 0 - Operation was successul.
+ */
+int
+qed_mcp_mdump_get_retain(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct mdump_retain_data_stc *p_mdump_retain);
+
+/**
+ * qed_mcp_set_resc_max_val(): Sets the MFW's max value for the given resource.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @res_id: RES ID.
+ * @resc_max_val: Resec max val.
+ * @p_mcp_resp: MCP Resp
+ *
+ * Return: Int - 0 - Operation was successul.
+ */
+int
+qed_mcp_set_resc_max_val(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_resources res_id,
+ u32 resc_max_val, u32 *p_mcp_resp);
+
+/**
+ * qed_mcp_get_resc_info(): Gets the MFW allocation info for the given
+ * resource.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @res_id: Res ID.
+ * @p_mcp_resp: MCP resp.
+ * @p_resc_num: Resc num.
+ * @p_resc_start: Resc start.
+ *
+ * Return: Int - 0 - Operation was successul.
+ */
+int
+qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_resources res_id,
+ u32 *p_mcp_resp, u32 *p_resc_num, u32 *p_resc_start);
+
+/**
+ * qed_mcp_ov_update_eswitch(): Send eswitch mode to MFW.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @eswitch: eswitch mode.
+ *
+ * Return: Int - 0 - Operation was successul.
+ */
+int qed_mcp_ov_update_eswitch(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_ov_eswitch eswitch);
+
+#define QED_MCP_RESC_LOCK_MIN_VAL RESOURCE_DUMP
+#define QED_MCP_RESC_LOCK_MAX_VAL 31
+
+enum qed_resc_lock {
+ QED_RESC_LOCK_DBG_DUMP = QED_MCP_RESC_LOCK_MIN_VAL,
+ QED_RESC_LOCK_PTP_PORT0,
+ QED_RESC_LOCK_PTP_PORT1,
+ QED_RESC_LOCK_PTP_PORT2,
+ QED_RESC_LOCK_PTP_PORT3,
+ QED_RESC_LOCK_RESC_ALLOC = QED_MCP_RESC_LOCK_MAX_VAL,
+ QED_RESC_LOCK_RESC_INVALID
+};
+
+/**
+ * qed_mcp_initiate_pf_flr(): Initiates PF FLR.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ *
+ * Return: Int - 0 - Operation was successul.
+ */
+int qed_mcp_initiate_pf_flr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+struct qed_resc_lock_params {
+ /* Resource number [valid values are 0..31] */
+ u8 resource;
+
+ /* Lock timeout value in seconds [default, none or 1..254] */
+ u8 timeout;
+#define QED_MCP_RESC_LOCK_TO_DEFAULT 0
+#define QED_MCP_RESC_LOCK_TO_NONE 255
+
+ /* Number of times to retry locking */
+ u8 retry_num;
+#define QED_MCP_RESC_LOCK_RETRY_CNT_DFLT 10
+
+ /* The interval in usec between retries */
+ u16 retry_interval;
+#define QED_MCP_RESC_LOCK_RETRY_VAL_DFLT 10000
+
+ /* Use sleep or delay between retries */
+ bool sleep_b4_retry;
+
+ /* Will be set as true if the resource is free and granted */
+ bool b_granted;
+
+ /* Will be filled with the resource owner.
+ * [0..15 = PF0-15, 16 = MFW]
+ */
+ u8 owner;
+};
+
+/**
+ * qed_mcp_resc_lock(): Acquires MFW generic resource lock.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @p_params: Params.
+ *
+ * Return: Int - 0 - Operation was successul.
+ */
+int
+qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, struct qed_resc_lock_params *p_params);
+
+struct qed_resc_unlock_params {
+ /* Resource number [valid values are 0..31] */
+ u8 resource;
+
+ /* Allow to release a resource even if belongs to another PF */
+ bool b_force;
+
+ /* Will be set as true if the resource is released */
+ bool b_released;
+};
+
+/**
+ * qed_mcp_resc_unlock(): Releases MFW generic resource lock.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @p_params: Params.
+ *
+ * Return: Int - 0 - Operation was successul.
+ */
+int
+qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_resc_unlock_params *p_params);
+
+/**
+ * qed_mcp_resc_lock_default_init(): Default initialization for
+ * lock/unlock resource structs.
+ *
+ * @p_lock: lock params struct to be initialized; Can be NULL.
+ * @p_unlock: unlock params struct to be initialized; Can be NULL.
+ * @resource: the requested resource.
+ * @b_is_permanent: disable retries & aging when set.
+ *
+ * Return: Void.
+ */
+void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock,
+ struct qed_resc_unlock_params *p_unlock,
+ enum qed_resc_lock
+ resource, bool b_is_permanent);
+
+/**
+ * qed_mcp_is_smart_an_supported(): Return whether management firmware
+ * support smart AN
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Return: bool true if feature is supported.
+ */
+bool qed_mcp_is_smart_an_supported(struct qed_hwfn *p_hwfn);
+
+/**
+ * qed_mcp_get_capabilities(): Learn of supported MFW features;
+ * To be done during early init.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ *
+ * Return: Int.
+ */
+int qed_mcp_get_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+/**
+ * qed_mcp_set_capabilities(): Inform MFW of set of features supported
+ * by driver. Should be done inside the content
+ * of the LOAD_REQ.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ *
+ * Return: Int.
+ */
+int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+/**
+ * qed_mcp_read_ufp_config(): Read ufp config from the shared memory.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ *
+ * Return: Void.
+ */
+void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+/**
+ * qed_mcp_nvm_info_populate(): Populate the nvm info shadow in the given
+ * hardware function.
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Return: Int.
+ */
+int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn);
+
+/**
+ * qed_mcp_nvm_info_free(): Delete nvm info shadow in the given
+ * hardware function.
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Return: Void.
+ */
+void qed_mcp_nvm_info_free(struct qed_hwfn *p_hwfn);
+
+/**
+ * qed_mcp_get_engine_config(): Get the engine affinity configuration.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ *
+ * Return: Int.
+ */
+int qed_mcp_get_engine_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+/**
+ * qed_mcp_get_ppfid_bitmap(): Get the PPFID bitmap.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ *
+ * Return: Int.
+ */
+int qed_mcp_get_ppfid_bitmap(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+/**
+ * qed_mcp_nvm_get_cfg(): Get NVM config attribute value.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @option_id: Option ID.
+ * @entity_id: Entity ID.
+ * @flags: Flags.
+ * @p_buf: Buf.
+ * @p_len: Len.
+ *
+ * Return: Int.
+ */
+int qed_mcp_nvm_get_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u16 option_id, u8 entity_id, u16 flags, u8 *p_buf,
+ u32 *p_len);
+
+/**
+ * qed_mcp_nvm_set_cfg(): Set NVM config attribute value.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @option_id: Option ID.
+ * @entity_id: Entity ID.
+ * @flags: Flags.
+ * @p_buf: Buf.
+ * @len: Len.
+ *
+ * Return: Int.
+ */
+int qed_mcp_nvm_set_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u16 option_id, u8 entity_id, u16 flags, u8 *p_buf,
+ u32 len);
+
+/**
+ * qed_mcp_is_esl_supported(): Return whether management firmware support ESL or not.
+ *
+ * @p_hwfn: hw function pointer
+ *
+ * Return: true if esl is supported, otherwise return false
+ */
+bool qed_mcp_is_esl_supported(struct qed_hwfn *p_hwfn);
+
+/**
+ * qed_mcp_get_esl_status(): Get enhanced system lockdown status
+ *
+ * @p_hwfn: hw function pointer
+ * @p_ptt: ptt resource pointer
+ * @active: ESL active status data pointer
+ *
+ * Return: 0 with esl status info on success, otherwise return error
+ */
+int qed_mcp_get_esl_status(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool *active);
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h
new file mode 100644
index 000000000..6459dd3fe
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h
@@ -0,0 +1,2475 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* QLogic qed NIC Driver
+ * Copyright (c) 2019-2021 Marvell International Ltd.
+ */
+
+#ifndef _QED_MFW_HSI_H
+#define _QED_MFW_HSI_H
+
+#define MFW_TRACE_SIGNATURE 0x25071946
+
+/* The trace in the buffer */
+#define MFW_TRACE_EVENTID_MASK 0x00ffff
+#define MFW_TRACE_PRM_SIZE_MASK 0x0f0000
+#define MFW_TRACE_PRM_SIZE_OFFSET 16
+#define MFW_TRACE_ENTRY_SIZE 3
+
+struct mcp_trace {
+ u32 signature; /* Help to identify that the trace is valid */
+ u32 size; /* the size of the trace buffer in bytes */
+ u32 curr_level; /* 2 - all will be written to the buffer
+ * 1 - debug trace will not be written
+ * 0 - just errors will be written to the buffer
+ */
+ u32 modules_mask[2]; /* a bit per module, 1 means write it, 0 means
+ * mask it.
+ */
+
+ /* Warning: the following pointers are assumed to be 32bits as they are
+ * used only in the MFW.
+ */
+ u32 trace_prod; /* The next trace will be written to this offset */
+ u32 trace_oldest; /* The oldest valid trace starts at this offset
+ * (usually very close after the current producer).
+ */
+};
+
+#define VF_MAX_STATIC 192
+#define VF_BITMAP_SIZE_IN_DWORDS (VF_MAX_STATIC / 32)
+#define VF_BITMAP_SIZE_IN_BYTES (VF_BITMAP_SIZE_IN_DWORDS * sizeof(u32))
+
+#define EXT_VF_MAX_STATIC 240
+#define EXT_VF_BITMAP_SIZE_IN_DWORDS (((EXT_VF_MAX_STATIC - 1) / 32) + 1)
+#define EXT_VF_BITMAP_SIZE_IN_BYTES (EXT_VF_BITMAP_SIZE_IN_DWORDS * sizeof(u32))
+#define ADDED_VF_BITMAP_SIZE 2
+
+#define MCP_GLOB_PATH_MAX 2
+#define MCP_PORT_MAX 2
+#define MCP_GLOB_PORT_MAX 4
+#define MCP_GLOB_FUNC_MAX 16
+
+typedef u32 offsize_t; /* In DWORDS !!! */
+/* Offset from the beginning of the MCP scratchpad */
+#define OFFSIZE_OFFSET_SHIFT 0
+#define OFFSIZE_OFFSET_MASK 0x0000ffff
+/* Size of specific element (not the whole array if any) */
+#define OFFSIZE_SIZE_SHIFT 16
+#define OFFSIZE_SIZE_MASK 0xffff0000
+
+#define SECTION_OFFSET(_offsize) (((((_offsize) & \
+ OFFSIZE_OFFSET_MASK) >> \
+ OFFSIZE_OFFSET_SHIFT) << 2))
+
+#define QED_SECTION_SIZE(_offsize) ((((_offsize) & \
+ OFFSIZE_SIZE_MASK) >> \
+ OFFSIZE_SIZE_SHIFT) << 2)
+
+#define SECTION_ADDR(_offsize, idx) (MCP_REG_SCRATCH + \
+ SECTION_OFFSET((_offsize)) + \
+ (QED_SECTION_SIZE((_offsize)) * (idx)))
+
+#define SECTION_OFFSIZE_ADDR(_pub_base, _section) \
+ ((_pub_base) + offsetof(struct mcp_public_data, sections[_section]))
+
+/* PHY configuration */
+struct eth_phy_cfg {
+ u32 speed;
+#define ETH_SPEED_AUTONEG 0x0
+#define ETH_SPEED_SMARTLINQ 0x8
+
+ u32 pause;
+#define ETH_PAUSE_NONE 0x0
+#define ETH_PAUSE_AUTONEG 0x1
+#define ETH_PAUSE_RX 0x2
+#define ETH_PAUSE_TX 0x4
+
+ u32 adv_speed;
+
+ u32 loopback_mode;
+#define ETH_LOOPBACK_NONE 0x0
+#define ETH_LOOPBACK_INT_PHY 0x1
+#define ETH_LOOPBACK_EXT_PHY 0x2
+#define ETH_LOOPBACK_EXT 0x3
+#define ETH_LOOPBACK_MAC 0x4
+#define ETH_LOOPBACK_CNIG_AH_ONLY_0123 0x5
+#define ETH_LOOPBACK_CNIG_AH_ONLY_2301 0x6
+#define ETH_LOOPBACK_PCS_AH_ONLY 0x7
+#define ETH_LOOPBACK_REVERSE_MAC_AH_ONLY 0x8
+#define ETH_LOOPBACK_INT_PHY_FEA_AH_ONLY 0x9
+
+ u32 eee_cfg;
+#define EEE_CFG_EEE_ENABLED BIT(0)
+#define EEE_CFG_TX_LPI BIT(1)
+#define EEE_CFG_ADV_SPEED_1G BIT(2)
+#define EEE_CFG_ADV_SPEED_10G BIT(3)
+#define EEE_TX_TIMER_USEC_MASK 0xfffffff0
+#define EEE_TX_TIMER_USEC_OFFSET 4
+#define EEE_TX_TIMER_USEC_BALANCED_TIME 0xa00
+#define EEE_TX_TIMER_USEC_AGGRESSIVE_TIME 0x100
+#define EEE_TX_TIMER_USEC_LATENCY_TIME 0x6000
+
+ u32 link_modes;
+
+ u32 fec_mode;
+#define FEC_FORCE_MODE_MASK 0x000000ff
+#define FEC_FORCE_MODE_OFFSET 0
+#define FEC_FORCE_MODE_NONE 0x00
+#define FEC_FORCE_MODE_FIRECODE 0x01
+#define FEC_FORCE_MODE_RS 0x02
+#define FEC_FORCE_MODE_AUTO 0x07
+#define FEC_EXTENDED_MODE_MASK 0xffffff00
+#define FEC_EXTENDED_MODE_OFFSET 8
+#define ETH_EXT_FEC_NONE 0x00000000
+#define ETH_EXT_FEC_10G_NONE 0x00000100
+#define ETH_EXT_FEC_10G_BASE_R 0x00000200
+#define ETH_EXT_FEC_25G_NONE 0x00000400
+#define ETH_EXT_FEC_25G_BASE_R 0x00000800
+#define ETH_EXT_FEC_25G_RS528 0x00001000
+#define ETH_EXT_FEC_40G_NONE 0x00002000
+#define ETH_EXT_FEC_40G_BASE_R 0x00004000
+#define ETH_EXT_FEC_50G_NONE 0x00008000
+#define ETH_EXT_FEC_50G_BASE_R 0x00010000
+#define ETH_EXT_FEC_50G_RS528 0x00020000
+#define ETH_EXT_FEC_50G_RS544 0x00040000
+#define ETH_EXT_FEC_100G_NONE 0x00080000
+#define ETH_EXT_FEC_100G_BASE_R 0x00100000
+#define ETH_EXT_FEC_100G_RS528 0x00200000
+#define ETH_EXT_FEC_100G_RS544 0x00400000
+
+ u32 extended_speed;
+#define ETH_EXT_SPEED_MASK 0x0000ffff
+#define ETH_EXT_SPEED_OFFSET 0
+#define ETH_EXT_SPEED_NONE 0x00000001
+#define ETH_EXT_SPEED_1G 0x00000002
+#define ETH_EXT_SPEED_10G 0x00000004
+#define ETH_EXT_SPEED_25G 0x00000008
+#define ETH_EXT_SPEED_40G 0x00000010
+#define ETH_EXT_SPEED_50G_BASE_R 0x00000020
+#define ETH_EXT_SPEED_50G_BASE_R2 0x00000040
+#define ETH_EXT_SPEED_100G_BASE_R2 0x00000080
+#define ETH_EXT_SPEED_100G_BASE_R4 0x00000100
+#define ETH_EXT_SPEED_100G_BASE_P4 0x00000200
+#define ETH_EXT_ADV_SPEED_MASK 0xFFFF0000
+#define ETH_EXT_ADV_SPEED_OFFSET 16
+#define ETH_EXT_ADV_SPEED_1G 0x00010000
+#define ETH_EXT_ADV_SPEED_10G 0x00020000
+#define ETH_EXT_ADV_SPEED_25G 0x00040000
+#define ETH_EXT_ADV_SPEED_40G 0x00080000
+#define ETH_EXT_ADV_SPEED_50G_BASE_R 0x00100000
+#define ETH_EXT_ADV_SPEED_50G_BASE_R2 0x00200000
+#define ETH_EXT_ADV_SPEED_100G_BASE_R2 0x00400000
+#define ETH_EXT_ADV_SPEED_100G_BASE_R4 0x00800000
+#define ETH_EXT_ADV_SPEED_100G_BASE_P4 0x01000000
+};
+
+struct port_mf_cfg {
+ u32 dynamic_cfg;
+#define PORT_MF_CFG_OV_TAG_MASK 0x0000ffff
+#define PORT_MF_CFG_OV_TAG_SHIFT 0
+#define PORT_MF_CFG_OV_TAG_DEFAULT PORT_MF_CFG_OV_TAG_MASK
+
+ u32 reserved[1];
+};
+
+struct eth_stats {
+ u64 r64;
+ u64 r127;
+ u64 r255;
+ u64 r511;
+ u64 r1023;
+ u64 r1518;
+
+ union {
+ struct {
+ u64 r1522;
+ u64 r2047;
+ u64 r4095;
+ u64 r9216;
+ u64 r16383;
+ } bb0;
+ struct {
+ u64 unused1;
+ u64 r1519_to_max;
+ u64 unused2;
+ u64 unused3;
+ u64 unused4;
+ } ah0;
+ } u0;
+
+ u64 rfcs;
+ u64 rxcf;
+ u64 rxpf;
+ u64 rxpp;
+ u64 raln;
+ u64 rfcr;
+ u64 rovr;
+ u64 rjbr;
+ u64 rund;
+ u64 rfrg;
+ u64 t64;
+ u64 t127;
+ u64 t255;
+ u64 t511;
+ u64 t1023;
+ u64 t1518;
+
+ union {
+ struct {
+ u64 t2047;
+ u64 t4095;
+ u64 t9216;
+ u64 t16383;
+ } bb1;
+ struct {
+ u64 t1519_to_max;
+ u64 unused6;
+ u64 unused7;
+ u64 unused8;
+ } ah1;
+ } u1;
+
+ u64 txpf;
+ u64 txpp;
+
+ union {
+ struct {
+ u64 tlpiec;
+ u64 tncl;
+ } bb2;
+ struct {
+ u64 unused9;
+ u64 unused10;
+ } ah2;
+ } u2;
+
+ u64 rbyte;
+ u64 rxuca;
+ u64 rxmca;
+ u64 rxbca;
+ u64 rxpok;
+ u64 tbyte;
+ u64 txuca;
+ u64 txmca;
+ u64 txbca;
+ u64 txcf;
+};
+
+struct pkt_type_cnt {
+ u64 tc_tx_pkt_cnt[8];
+ u64 tc_tx_oct_cnt[8];
+ u64 priority_rx_pkt_cnt[8];
+ u64 priority_rx_oct_cnt[8];
+};
+
+struct brb_stats {
+ u64 brb_truncate[8];
+ u64 brb_discard[8];
+};
+
+struct port_stats {
+ struct brb_stats brb;
+ struct eth_stats eth;
+};
+
+struct couple_mode_teaming {
+ u8 port_cmt[MCP_GLOB_PORT_MAX];
+#define PORT_CMT_IN_TEAM BIT(0)
+
+#define PORT_CMT_PORT_ROLE BIT(1)
+#define PORT_CMT_PORT_INACTIVE (0 << 1)
+#define PORT_CMT_PORT_ACTIVE BIT(1)
+
+#define PORT_CMT_TEAM_MASK BIT(2)
+#define PORT_CMT_TEAM0 (0 << 2)
+#define PORT_CMT_TEAM1 BIT(2)
+};
+
+#define LLDP_CHASSIS_ID_STAT_LEN 4
+#define LLDP_PORT_ID_STAT_LEN 4
+#define DCBX_MAX_APP_PROTOCOL 32
+#define MAX_SYSTEM_LLDP_TLV_DATA 32
+#define MAX_TLV_BUFFER 128
+
+enum _lldp_agent {
+ LLDP_NEAREST_BRIDGE = 0,
+ LLDP_NEAREST_NON_TPMR_BRIDGE,
+ LLDP_NEAREST_CUSTOMER_BRIDGE,
+ LLDP_MAX_LLDP_AGENTS
+};
+
+struct lldp_config_params_s {
+ u32 config;
+#define LLDP_CONFIG_TX_INTERVAL_MASK 0x000000ff
+#define LLDP_CONFIG_TX_INTERVAL_SHIFT 0
+#define LLDP_CONFIG_HOLD_MASK 0x00000f00
+#define LLDP_CONFIG_HOLD_SHIFT 8
+#define LLDP_CONFIG_MAX_CREDIT_MASK 0x0000f000
+#define LLDP_CONFIG_MAX_CREDIT_SHIFT 12
+#define LLDP_CONFIG_ENABLE_RX_MASK 0x40000000
+#define LLDP_CONFIG_ENABLE_RX_SHIFT 30
+#define LLDP_CONFIG_ENABLE_TX_MASK 0x80000000
+#define LLDP_CONFIG_ENABLE_TX_SHIFT 31
+ u32 local_chassis_id[LLDP_CHASSIS_ID_STAT_LEN];
+ u32 local_port_id[LLDP_PORT_ID_STAT_LEN];
+};
+
+struct lldp_status_params_s {
+ u32 prefix_seq_num;
+ u32 status;
+ u32 peer_chassis_id[LLDP_CHASSIS_ID_STAT_LEN];
+ u32 peer_port_id[LLDP_PORT_ID_STAT_LEN];
+ u32 suffix_seq_num;
+};
+
+struct dcbx_ets_feature {
+ u32 flags;
+#define DCBX_ETS_ENABLED_MASK 0x00000001
+#define DCBX_ETS_ENABLED_SHIFT 0
+#define DCBX_ETS_WILLING_MASK 0x00000002
+#define DCBX_ETS_WILLING_SHIFT 1
+#define DCBX_ETS_ERROR_MASK 0x00000004
+#define DCBX_ETS_ERROR_SHIFT 2
+#define DCBX_ETS_CBS_MASK 0x00000008
+#define DCBX_ETS_CBS_SHIFT 3
+#define DCBX_ETS_MAX_TCS_MASK 0x000000f0
+#define DCBX_ETS_MAX_TCS_SHIFT 4
+#define DCBX_OOO_TC_MASK 0x00000f00
+#define DCBX_OOO_TC_SHIFT 8
+ u32 pri_tc_tbl[1];
+#define DCBX_TCP_OOO_TC (4)
+#define DCBX_TCP_OOO_K2_4PORT_TC (3)
+
+#define NIG_ETS_ISCSI_OOO_CLIENT_OFFSET (DCBX_TCP_OOO_TC + 1)
+#define DCBX_CEE_STRICT_PRIORITY 0xf
+ u32 tc_bw_tbl[2];
+ u32 tc_tsa_tbl[2];
+#define DCBX_ETS_TSA_STRICT 0
+#define DCBX_ETS_TSA_CBS 1
+#define DCBX_ETS_TSA_ETS 2
+};
+
+#define DCBX_TCP_OOO_TC (4)
+#define DCBX_TCP_OOO_K2_4PORT_TC (3)
+
+struct dcbx_app_priority_entry {
+ u32 entry;
+#define DCBX_APP_PRI_MAP_MASK 0x000000ff
+#define DCBX_APP_PRI_MAP_SHIFT 0
+#define DCBX_APP_PRI_0 0x01
+#define DCBX_APP_PRI_1 0x02
+#define DCBX_APP_PRI_2 0x04
+#define DCBX_APP_PRI_3 0x08
+#define DCBX_APP_PRI_4 0x10
+#define DCBX_APP_PRI_5 0x20
+#define DCBX_APP_PRI_6 0x40
+#define DCBX_APP_PRI_7 0x80
+#define DCBX_APP_SF_MASK 0x00000300
+#define DCBX_APP_SF_SHIFT 8
+#define DCBX_APP_SF_ETHTYPE 0
+#define DCBX_APP_SF_PORT 1
+#define DCBX_APP_SF_IEEE_MASK 0x0000f000
+#define DCBX_APP_SF_IEEE_SHIFT 12
+#define DCBX_APP_SF_IEEE_RESERVED 0
+#define DCBX_APP_SF_IEEE_ETHTYPE 1
+#define DCBX_APP_SF_IEEE_TCP_PORT 2
+#define DCBX_APP_SF_IEEE_UDP_PORT 3
+#define DCBX_APP_SF_IEEE_TCP_UDP_PORT 4
+
+#define DCBX_APP_PROTOCOL_ID_MASK 0xffff0000
+#define DCBX_APP_PROTOCOL_ID_SHIFT 16
+};
+
+struct dcbx_app_priority_feature {
+ u32 flags;
+#define DCBX_APP_ENABLED_MASK 0x00000001
+#define DCBX_APP_ENABLED_SHIFT 0
+#define DCBX_APP_WILLING_MASK 0x00000002
+#define DCBX_APP_WILLING_SHIFT 1
+#define DCBX_APP_ERROR_MASK 0x00000004
+#define DCBX_APP_ERROR_SHIFT 2
+#define DCBX_APP_MAX_TCS_MASK 0x0000f000
+#define DCBX_APP_MAX_TCS_SHIFT 12
+#define DCBX_APP_NUM_ENTRIES_MASK 0x00ff0000
+#define DCBX_APP_NUM_ENTRIES_SHIFT 16
+ struct dcbx_app_priority_entry app_pri_tbl[DCBX_MAX_APP_PROTOCOL];
+};
+
+struct dcbx_features {
+ struct dcbx_ets_feature ets;
+ u32 pfc;
+#define DCBX_PFC_PRI_EN_BITMAP_MASK 0x000000ff
+#define DCBX_PFC_PRI_EN_BITMAP_SHIFT 0
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_0 0x01
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_1 0x02
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_2 0x04
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_3 0x08
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_4 0x10
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_5 0x20
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_6 0x40
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_7 0x80
+
+#define DCBX_PFC_FLAGS_MASK 0x0000ff00
+#define DCBX_PFC_FLAGS_SHIFT 8
+#define DCBX_PFC_CAPS_MASK 0x00000f00
+#define DCBX_PFC_CAPS_SHIFT 8
+#define DCBX_PFC_MBC_MASK 0x00004000
+#define DCBX_PFC_MBC_SHIFT 14
+#define DCBX_PFC_WILLING_MASK 0x00008000
+#define DCBX_PFC_WILLING_SHIFT 15
+#define DCBX_PFC_ENABLED_MASK 0x00010000
+#define DCBX_PFC_ENABLED_SHIFT 16
+#define DCBX_PFC_ERROR_MASK 0x00020000
+#define DCBX_PFC_ERROR_SHIFT 17
+
+ struct dcbx_app_priority_feature app;
+};
+
+struct dcbx_local_params {
+ u32 config;
+#define DCBX_CONFIG_VERSION_MASK 0x00000007
+#define DCBX_CONFIG_VERSION_SHIFT 0
+#define DCBX_CONFIG_VERSION_DISABLED 0
+#define DCBX_CONFIG_VERSION_IEEE 1
+#define DCBX_CONFIG_VERSION_CEE 2
+#define DCBX_CONFIG_VERSION_STATIC 4
+
+ u32 flags;
+ struct dcbx_features features;
+};
+
+struct dcbx_mib {
+ u32 prefix_seq_num;
+ u32 flags;
+ struct dcbx_features features;
+ u32 suffix_seq_num;
+};
+
+struct lldp_system_tlvs_buffer_s {
+ u32 flags;
+#define LLDP_SYSTEM_TLV_VALID_MASK 0x1
+#define LLDP_SYSTEM_TLV_VALID_OFFSET 0
+#define LLDP_SYSTEM_TLV_MANDATORY_MASK 0x2
+#define LLDP_SYSTEM_TLV_MANDATORY_SHIFT 1
+#define LLDP_SYSTEM_TLV_LENGTH_MASK 0xffff0000
+#define LLDP_SYSTEM_TLV_LENGTH_SHIFT 16
+ u32 data[MAX_SYSTEM_LLDP_TLV_DATA];
+};
+
+struct lldp_received_tlvs_s {
+ u32 prefix_seq_num;
+ u32 length;
+ u32 tlvs_buffer[MAX_TLV_BUFFER];
+ u32 suffix_seq_num;
+};
+
+struct dcb_dscp_map {
+ u32 flags;
+#define DCB_DSCP_ENABLE_MASK 0x1
+#define DCB_DSCP_ENABLE_SHIFT 0
+#define DCB_DSCP_ENABLE 1
+ u32 dscp_pri_map[8];
+};
+
+struct mcp_val64 {
+ u32 lo;
+ u32 hi;
+};
+
+struct generic_idc_msg_s {
+ u32 source_pf;
+ struct mcp_val64 msg;
+};
+
+struct pcie_stats_stc {
+ u32 sr_cnt_wr_byte_msb;
+ u32 sr_cnt_wr_byte_lsb;
+ u32 sr_cnt_wr_cnt;
+ u32 sr_cnt_rd_byte_msb;
+ u32 sr_cnt_rd_byte_lsb;
+ u32 sr_cnt_rd_cnt;
+};
+
+enum _attribute_commands_e {
+ ATTRIBUTE_CMD_READ = 0,
+ ATTRIBUTE_CMD_WRITE,
+ ATTRIBUTE_CMD_READ_CLEAR,
+ ATTRIBUTE_CMD_CLEAR,
+ ATTRIBUTE_NUM_OF_COMMANDS
+};
+
+struct public_global {
+ u32 max_path;
+ u32 max_ports;
+#define MODE_1P 1
+#define MODE_2P 2
+#define MODE_3P 3
+#define MODE_4P 4
+ u32 debug_mb_offset;
+ u32 phymod_dbg_mb_offset;
+ struct couple_mode_teaming cmt;
+ s32 internal_temperature;
+ u32 mfw_ver;
+ u32 running_bundle_id;
+ s32 external_temperature;
+ u32 mdump_reason;
+ u32 ext_phy_upgrade_fw;
+ u8 runtime_port_swap_map[MODE_4P];
+ u32 data_ptr;
+ u32 data_size;
+ u32 bmb_error_status_cnt;
+ u32 bmb_jumbo_frame_cnt;
+ u32 sent_to_bmc_cnt;
+ u32 handled_by_mfw;
+ u32 sent_to_nw_cnt;
+ u32 to_bmc_kb_per_second;
+ u32 bcast_dropped_to_bmc_cnt;
+ u32 mcast_dropped_to_bmc_cnt;
+ u32 ucast_dropped_to_bmc_cnt;
+ u32 ncsi_response_failure_cnt;
+ u32 device_attr;
+ u32 vpd_warning;
+};
+
+struct fw_flr_mb {
+ u32 aggint;
+ u32 opgen_addr;
+ u32 accum_ack;
+};
+
+struct public_path {
+ struct fw_flr_mb flr_mb;
+ u32 mcp_vf_disabled[VF_MAX_STATIC / 32];
+
+ u32 process_kill;
+#define PROCESS_KILL_COUNTER_MASK 0x0000ffff
+#define PROCESS_KILL_COUNTER_SHIFT 0
+#define PROCESS_KILL_GLOB_AEU_BIT_MASK 0xffff0000
+#define PROCESS_KILL_GLOB_AEU_BIT_SHIFT 16
+#define GLOBAL_AEU_BIT(aeu_reg_id, aeu_bit) ((aeu_reg_id) * 32 + (aeu_bit))
+};
+
+#define FC_NPIV_WWPN_SIZE 8
+#define FC_NPIV_WWNN_SIZE 8
+struct dci_npiv_settings {
+ u8 npiv_wwpn[FC_NPIV_WWPN_SIZE];
+ u8 npiv_wwnn[FC_NPIV_WWNN_SIZE];
+};
+
+struct dci_fc_npiv_cfg {
+ /* hdr used internally by the MFW */
+ u32 hdr;
+ u32 num_of_npiv;
+};
+
+#define MAX_NUMBER_NPIV 64
+struct dci_fc_npiv_tbl {
+ struct dci_fc_npiv_cfg fc_npiv_cfg;
+ struct dci_npiv_settings settings[MAX_NUMBER_NPIV];
+};
+
+struct pause_flood_monitor {
+ u8 period_cnt;
+ u8 any_brb_prs_packet_hist;
+ u8 any_brb_block_is_full_hist;
+ u8 flags;
+ u32 num_of_state_changes;
+};
+
+struct public_port {
+ u32 validity_map;
+
+ u32 link_status;
+#define LINK_STATUS_LINK_UP 0x00000001
+#define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001e
+#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD BIT(1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (2 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_10G (3 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_20G (4 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_40G (5 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_50G (6 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_100G (7 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_25G (8 << 1)
+#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020
+#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040
+#define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080
+#define LINK_STATUS_PFC_ENABLED 0x00000100
+#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200
+#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400
+#define LINK_STATUS_LINK_PARTNER_10G_CAPABLE 0x00000800
+#define LINK_STATUS_LINK_PARTNER_20G_CAPABLE 0x00001000
+#define LINK_STATUS_LINK_PARTNER_40G_CAPABLE 0x00002000
+#define LINK_STATUS_LINK_PARTNER_50G_CAPABLE 0x00004000
+#define LINK_STATUS_LINK_PARTNER_100G_CAPABLE 0x00008000
+#define LINK_STATUS_LINK_PARTNER_25G_CAPABLE 0x00010000
+#define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000c0000
+#define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0 << 18)
+#define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE BIT(18)
+#define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2 << 18)
+#define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3 << 18)
+#define LINK_STATUS_SFP_TX_FAULT 0x00100000
+#define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00200000
+#define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00400000
+#define LINK_STATUS_RX_SIGNAL_PRESENT 0x00800000
+#define LINK_STATUS_MAC_LOCAL_FAULT 0x01000000
+#define LINK_STATUS_MAC_REMOTE_FAULT 0x02000000
+#define LINK_STATUS_UNSUPPORTED_SPD_REQ 0x04000000
+
+#define LINK_STATUS_FEC_MODE_MASK 0x38000000
+#define LINK_STATUS_FEC_MODE_NONE (0 << 27)
+#define LINK_STATUS_FEC_MODE_FIRECODE_CL74 BIT(27)
+#define LINK_STATUS_FEC_MODE_RS_CL91 (2 << 27)
+#define LINK_STATUS_EXT_PHY_LINK_UP BIT(30)
+
+ u32 link_status1;
+ u32 ext_phy_fw_version;
+ u32 drv_phy_cfg_addr;
+
+ u32 port_stx;
+
+ u32 stat_nig_timer;
+
+ struct port_mf_cfg port_mf_config;
+ struct port_stats stats;
+
+ u32 media_type;
+#define MEDIA_UNSPECIFIED 0x0
+#define MEDIA_SFPP_10G_FIBER 0x1
+#define MEDIA_XFP_FIBER 0x2
+#define MEDIA_DA_TWINAX 0x3
+#define MEDIA_BASE_T 0x4
+#define MEDIA_SFP_1G_FIBER 0x5
+#define MEDIA_MODULE_FIBER 0x6
+#define MEDIA_KR 0xf0
+#define MEDIA_NOT_PRESENT 0xff
+
+ u32 lfa_status;
+ u32 link_change_count;
+
+ struct lldp_config_params_s lldp_config_params[LLDP_MAX_LLDP_AGENTS];
+ struct lldp_status_params_s lldp_status_params[LLDP_MAX_LLDP_AGENTS];
+ struct lldp_system_tlvs_buffer_s system_lldp_tlvs_buf;
+
+ /* DCBX related MIB */
+ struct dcbx_local_params local_admin_dcbx_mib;
+ struct dcbx_mib remote_dcbx_mib;
+ struct dcbx_mib operational_dcbx_mib;
+
+ u32 fc_npiv_nvram_tbl_addr;
+ u32 fc_npiv_nvram_tbl_size;
+
+ u32 transceiver_data;
+#define ETH_TRANSCEIVER_STATE_MASK 0x000000ff
+#define ETH_TRANSCEIVER_STATE_SHIFT 0x00000000
+#define ETH_TRANSCEIVER_STATE_OFFSET 0x00000000
+#define ETH_TRANSCEIVER_STATE_UNPLUGGED 0x00000000
+#define ETH_TRANSCEIVER_STATE_PRESENT 0x00000001
+#define ETH_TRANSCEIVER_STATE_VALID 0x00000003
+#define ETH_TRANSCEIVER_STATE_UPDATING 0x00000008
+#define ETH_TRANSCEIVER_STATE_IN_SETUP 0x10
+#define ETH_TRANSCEIVER_TYPE_MASK 0x0000ff00
+#define ETH_TRANSCEIVER_TYPE_OFFSET 0x8
+#define ETH_TRANSCEIVER_TYPE_NONE 0x00
+#define ETH_TRANSCEIVER_TYPE_UNKNOWN 0xff
+#define ETH_TRANSCEIVER_TYPE_1G_PCC 0x01
+#define ETH_TRANSCEIVER_TYPE_1G_ACC 0x02
+#define ETH_TRANSCEIVER_TYPE_1G_LX 0x03
+#define ETH_TRANSCEIVER_TYPE_1G_SX 0x04
+#define ETH_TRANSCEIVER_TYPE_10G_SR 0x05
+#define ETH_TRANSCEIVER_TYPE_10G_LR 0x06
+#define ETH_TRANSCEIVER_TYPE_10G_LRM 0x07
+#define ETH_TRANSCEIVER_TYPE_10G_ER 0x08
+#define ETH_TRANSCEIVER_TYPE_10G_PCC 0x09
+#define ETH_TRANSCEIVER_TYPE_10G_ACC 0x0a
+#define ETH_TRANSCEIVER_TYPE_XLPPI 0x0b
+#define ETH_TRANSCEIVER_TYPE_40G_LR4 0x0c
+#define ETH_TRANSCEIVER_TYPE_40G_SR4 0x0d
+#define ETH_TRANSCEIVER_TYPE_40G_CR4 0x0e
+#define ETH_TRANSCEIVER_TYPE_100G_AOC 0x0f
+#define ETH_TRANSCEIVER_TYPE_100G_SR4 0x10
+#define ETH_TRANSCEIVER_TYPE_100G_LR4 0x11
+#define ETH_TRANSCEIVER_TYPE_100G_ER4 0x12
+#define ETH_TRANSCEIVER_TYPE_100G_ACC 0x13
+#define ETH_TRANSCEIVER_TYPE_100G_CR4 0x14
+#define ETH_TRANSCEIVER_TYPE_4x10G_SR 0x15
+#define ETH_TRANSCEIVER_TYPE_25G_CA_N 0x16
+#define ETH_TRANSCEIVER_TYPE_25G_ACC_S 0x17
+#define ETH_TRANSCEIVER_TYPE_25G_CA_S 0x18
+#define ETH_TRANSCEIVER_TYPE_25G_ACC_M 0x19
+#define ETH_TRANSCEIVER_TYPE_25G_CA_L 0x1a
+#define ETH_TRANSCEIVER_TYPE_25G_ACC_L 0x1b
+#define ETH_TRANSCEIVER_TYPE_25G_SR 0x1c
+#define ETH_TRANSCEIVER_TYPE_25G_LR 0x1d
+#define ETH_TRANSCEIVER_TYPE_25G_AOC 0x1e
+#define ETH_TRANSCEIVER_TYPE_4x10G 0x1f
+#define ETH_TRANSCEIVER_TYPE_4x25G_CR 0x20
+#define ETH_TRANSCEIVER_TYPE_1000BASET 0x21
+#define ETH_TRANSCEIVER_TYPE_10G_BASET 0x22
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR 0x30
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR 0x31
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR 0x32
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR 0x33
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR 0x34
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR 0x35
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC 0x36
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR 0x37
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_LR 0x38
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR 0x39
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR 0x3a
+
+ u32 wol_info;
+ u32 wol_pkt_len;
+ u32 wol_pkt_details;
+ struct dcb_dscp_map dcb_dscp_map;
+
+ u32 eee_status;
+#define EEE_ACTIVE_BIT BIT(0)
+#define EEE_LD_ADV_STATUS_MASK 0x000000f0
+#define EEE_LD_ADV_STATUS_OFFSET 4
+#define EEE_1G_ADV BIT(1)
+#define EEE_10G_ADV BIT(2)
+#define EEE_LP_ADV_STATUS_MASK 0x00000f00
+#define EEE_LP_ADV_STATUS_OFFSET 8
+#define EEE_SUPPORTED_SPEED_MASK 0x0000f000
+#define EEE_SUPPORTED_SPEED_OFFSET 12
+#define EEE_1G_SUPPORTED BIT(1)
+#define EEE_10G_SUPPORTED BIT(2)
+
+ u32 eee_remote;
+#define EEE_REMOTE_TW_TX_MASK 0x0000ffff
+#define EEE_REMOTE_TW_TX_OFFSET 0
+#define EEE_REMOTE_TW_RX_MASK 0xffff0000
+#define EEE_REMOTE_TW_RX_OFFSET 16
+
+ u32 module_info;
+
+ u32 oem_cfg_port;
+#define OEM_CFG_CHANNEL_TYPE_MASK 0x00000003
+#define OEM_CFG_CHANNEL_TYPE_OFFSET 0
+#define OEM_CFG_CHANNEL_TYPE_VLAN_PARTITION 0x1
+#define OEM_CFG_CHANNEL_TYPE_STAGGED 0x2
+#define OEM_CFG_SCHED_TYPE_MASK 0x0000000C
+#define OEM_CFG_SCHED_TYPE_OFFSET 2
+#define OEM_CFG_SCHED_TYPE_ETS 0x1
+#define OEM_CFG_SCHED_TYPE_VNIC_BW 0x2
+
+ struct lldp_received_tlvs_s lldp_received_tlvs[LLDP_MAX_LLDP_AGENTS];
+ u32 system_lldp_tlvs_buf2[MAX_SYSTEM_LLDP_TLV_DATA];
+ u32 phy_module_temperature;
+ u32 nig_reg_stat_rx_bmb_packet;
+ u32 nig_reg_rx_llh_ncsi_mcp_mask;
+ u32 nig_reg_rx_llh_ncsi_mcp_mask_2;
+ struct pause_flood_monitor pause_flood_monitor;
+ u32 nig_drain_cnt;
+ struct pkt_type_cnt pkt_tc_priority_cnt;
+};
+
+#define MCP_DRV_VER_STR_SIZE 16
+#define MCP_DRV_VER_STR_SIZE_DWORD (MCP_DRV_VER_STR_SIZE / sizeof(u32))
+#define MCP_DRV_NVM_BUF_LEN 32
+struct drv_version_stc {
+ u32 version;
+ u8 name[MCP_DRV_VER_STR_SIZE - 4];
+};
+
+struct public_func {
+ u32 iscsi_boot_signature;
+ u32 iscsi_boot_block_offset;
+
+ u32 mtu_size;
+
+ u32 c2s_pcp_map_lower;
+ u32 c2s_pcp_map_upper;
+ u32 c2s_pcp_map_default;
+
+ struct generic_idc_msg_s generic_idc_msg;
+
+ u32 num_of_msix;
+
+ u32 config;
+#define FUNC_MF_CFG_FUNC_HIDE 0x00000001
+#define FUNC_MF_CFG_PAUSE_ON_HOST_RING 0x00000002
+#define FUNC_MF_CFG_PAUSE_ON_HOST_RING_SHIFT 0x00000001
+
+#define FUNC_MF_CFG_PROTOCOL_MASK 0x000000f0
+#define FUNC_MF_CFG_PROTOCOL_SHIFT 4
+#define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000000
+#define FUNC_MF_CFG_PROTOCOL_ISCSI 0x00000010
+#define FUNC_MF_CFG_PROTOCOL_FCOE 0x00000020
+#define FUNC_MF_CFG_PROTOCOL_ROCE 0x00000030
+#define FUNC_MF_CFG_PROTOCOL_MAX 0x00000030
+
+#define FUNC_MF_CFG_MIN_BW_MASK 0x0000ff00
+#define FUNC_MF_CFG_MIN_BW_SHIFT 8
+#define FUNC_MF_CFG_MIN_BW_DEFAULT 0x00000000
+#define FUNC_MF_CFG_MAX_BW_MASK 0x00ff0000
+#define FUNC_MF_CFG_MAX_BW_SHIFT 16
+#define FUNC_MF_CFG_MAX_BW_DEFAULT 0x00640000
+
+ u32 status;
+#define FUNC_STATUS_VIRTUAL_LINK_UP 0x00000001
+
+ u32 mac_upper;
+#define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff
+#define FUNC_MF_CFG_UPPERMAC_SHIFT 0
+#define FUNC_MF_CFG_UPPERMAC_DEFAULT FUNC_MF_CFG_UPPERMAC_MASK
+ u32 mac_lower;
+#define FUNC_MF_CFG_LOWERMAC_DEFAULT 0xffffffff
+
+ u32 fcoe_wwn_port_name_upper;
+ u32 fcoe_wwn_port_name_lower;
+
+ u32 fcoe_wwn_node_name_upper;
+ u32 fcoe_wwn_node_name_lower;
+
+ u32 ovlan_stag;
+#define FUNC_MF_CFG_OV_STAG_MASK 0x0000ffff
+#define FUNC_MF_CFG_OV_STAG_SHIFT 0
+#define FUNC_MF_CFG_OV_STAG_DEFAULT FUNC_MF_CFG_OV_STAG_MASK
+
+ u32 pf_allocation;
+
+ u32 preserve_data;
+
+ u32 driver_last_activity_ts;
+
+ u32 drv_ack_vf_disabled[VF_MAX_STATIC / 32];
+
+ u32 drv_id;
+#define DRV_ID_PDA_COMP_VER_MASK 0x0000ffff
+#define DRV_ID_PDA_COMP_VER_SHIFT 0
+
+#define LOAD_REQ_HSI_VERSION 2
+#define DRV_ID_MCP_HSI_VER_MASK 0x00ff0000
+#define DRV_ID_MCP_HSI_VER_SHIFT 16
+#define DRV_ID_MCP_HSI_VER_CURRENT (LOAD_REQ_HSI_VERSION << \
+ DRV_ID_MCP_HSI_VER_SHIFT)
+
+#define DRV_ID_DRV_TYPE_MASK 0x7f000000
+#define DRV_ID_DRV_TYPE_SHIFT 24
+#define DRV_ID_DRV_TYPE_UNKNOWN (0 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_LINUX BIT(DRV_ID_DRV_TYPE_SHIFT)
+
+#define DRV_ID_DRV_INIT_HW_MASK 0x80000000
+#define DRV_ID_DRV_INIT_HW_SHIFT 31
+#define DRV_ID_DRV_INIT_HW_FLAG BIT(DRV_ID_DRV_INIT_HW_SHIFT)
+
+ u32 oem_cfg_func;
+#define OEM_CFG_FUNC_TC_MASK 0x0000000F
+#define OEM_CFG_FUNC_TC_OFFSET 0
+#define OEM_CFG_FUNC_TC_0 0x0
+#define OEM_CFG_FUNC_TC_1 0x1
+#define OEM_CFG_FUNC_TC_2 0x2
+#define OEM_CFG_FUNC_TC_3 0x3
+#define OEM_CFG_FUNC_TC_4 0x4
+#define OEM_CFG_FUNC_TC_5 0x5
+#define OEM_CFG_FUNC_TC_6 0x6
+#define OEM_CFG_FUNC_TC_7 0x7
+
+#define OEM_CFG_FUNC_HOST_PRI_CTRL_MASK 0x00000030
+#define OEM_CFG_FUNC_HOST_PRI_CTRL_OFFSET 4
+#define OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC 0x1
+#define OEM_CFG_FUNC_HOST_PRI_CTRL_OS 0x2
+
+ struct drv_version_stc drv_ver;
+};
+
+struct mcp_mac {
+ u32 mac_upper;
+ u32 mac_lower;
+};
+
+struct mcp_file_att {
+ u32 nvm_start_addr;
+ u32 len;
+};
+
+struct bist_nvm_image_att {
+ u32 return_code;
+ u32 image_type;
+ u32 nvm_start_addr;
+ u32 len;
+};
+
+struct lan_stats_stc {
+ u64 ucast_rx_pkts;
+ u64 ucast_tx_pkts;
+ u32 fcs_err;
+ u32 rserved;
+};
+
+struct fcoe_stats_stc {
+ u64 rx_pkts;
+ u64 tx_pkts;
+ u32 fcs_err;
+ u32 login_failure;
+};
+
+struct iscsi_stats_stc {
+ u64 rx_pdus;
+ u64 tx_pdus;
+ u64 rx_bytes;
+ u64 tx_bytes;
+};
+
+struct rdma_stats_stc {
+ u64 rx_pkts;
+ u64 tx_pkts;
+ u64 rx_bytes;
+ u64 tx_bytes;
+};
+
+struct ocbb_data_stc {
+ u32 ocbb_host_addr;
+ u32 ocsd_host_addr;
+ u32 ocsd_req_update_interval;
+};
+
+struct fcoe_cap_stc {
+ u32 max_ios;
+ u32 max_log;
+ u32 max_exch;
+ u32 max_npiv;
+ u32 max_tgt;
+ u32 max_outstnd;
+};
+
+#define MAX_NUM_OF_SENSORS 7
+struct temperature_status_stc {
+ u32 num_of_sensors;
+ u32 sensor[MAX_NUM_OF_SENSORS];
+};
+
+/* crash dump configuration header */
+struct mdump_config_stc {
+ u32 version;
+ u32 config;
+ u32 epoc;
+ u32 num_of_logs;
+ u32 valid_logs;
+};
+
+enum resource_id_enum {
+ RESOURCE_NUM_SB_E = 0,
+ RESOURCE_NUM_L2_QUEUE_E = 1,
+ RESOURCE_NUM_VPORT_E = 2,
+ RESOURCE_NUM_VMQ_E = 3,
+ RESOURCE_FACTOR_NUM_RSS_PF_E = 4,
+ RESOURCE_FACTOR_RSS_PER_VF_E = 5,
+ RESOURCE_NUM_RL_E = 6,
+ RESOURCE_NUM_PQ_E = 7,
+ RESOURCE_NUM_VF_E = 8,
+ RESOURCE_VFC_FILTER_E = 9,
+ RESOURCE_ILT_E = 10,
+ RESOURCE_CQS_E = 11,
+ RESOURCE_GFT_PROFILES_E = 12,
+ RESOURCE_NUM_TC_E = 13,
+ RESOURCE_NUM_RSS_ENGINES_E = 14,
+ RESOURCE_LL2_QUEUE_E = 15,
+ RESOURCE_RDMA_STATS_QUEUE_E = 16,
+ RESOURCE_BDQ_E = 17,
+ RESOURCE_QCN_E = 18,
+ RESOURCE_LLH_FILTER_E = 19,
+ RESOURCE_VF_MAC_ADDR = 20,
+ RESOURCE_LL2_CQS_E = 21,
+ RESOURCE_VF_CNQS = 22,
+ RESOURCE_MAX_NUM,
+ RESOURCE_NUM_INVALID = 0xFFFFFFFF
+};
+
+/* Resource ID is to be filled by the driver in the MB request
+ * Size, offset & flags to be filled by the MFW in the MB response
+ */
+struct resource_info {
+ enum resource_id_enum res_id;
+ u32 size; /* number of allocated resources */
+ u32 offset; /* Offset of the 1st resource */
+ u32 vf_size;
+ u32 vf_offset;
+ u32 flags;
+#define RESOURCE_ELEMENT_STRICT BIT(0)
+};
+
+struct mcp_wwn {
+ u32 wwn_upper;
+ u32 wwn_lower;
+};
+
+#define DRV_ROLE_NONE 0
+#define DRV_ROLE_PREBOOT 1
+#define DRV_ROLE_OS 2
+#define DRV_ROLE_KDUMP 3
+
+struct load_req_stc {
+ u32 drv_ver_0;
+ u32 drv_ver_1;
+ u32 fw_ver;
+ u32 misc0;
+#define LOAD_REQ_ROLE_MASK 0x000000FF
+#define LOAD_REQ_ROLE_SHIFT 0
+#define LOAD_REQ_LOCK_TO_MASK 0x0000FF00
+#define LOAD_REQ_LOCK_TO_SHIFT 8
+#define LOAD_REQ_LOCK_TO_DEFAULT 0
+#define LOAD_REQ_LOCK_TO_NONE 255
+#define LOAD_REQ_FORCE_MASK 0x000F0000
+#define LOAD_REQ_FORCE_SHIFT 16
+#define LOAD_REQ_FORCE_NONE 0
+#define LOAD_REQ_FORCE_PF 1
+#define LOAD_REQ_FORCE_ALL 2
+#define LOAD_REQ_FLAGS0_MASK 0x00F00000
+#define LOAD_REQ_FLAGS0_SHIFT 20
+#define LOAD_REQ_FLAGS0_AVOID_RESET (0x1 << 0)
+};
+
+struct load_rsp_stc {
+ u32 drv_ver_0;
+ u32 drv_ver_1;
+ u32 fw_ver;
+ u32 misc0;
+#define LOAD_RSP_ROLE_MASK 0x000000FF
+#define LOAD_RSP_ROLE_SHIFT 0
+#define LOAD_RSP_HSI_MASK 0x0000FF00
+#define LOAD_RSP_HSI_SHIFT 8
+#define LOAD_RSP_FLAGS0_MASK 0x000F0000
+#define LOAD_RSP_FLAGS0_SHIFT 16
+#define LOAD_RSP_FLAGS0_DRV_EXISTS (0x1 << 0)
+};
+
+struct mdump_retain_data_stc {
+ u32 valid;
+ u32 epoch;
+ u32 pf;
+ u32 status;
+};
+
+struct attribute_cmd_write_stc {
+ u32 val;
+ u32 mask;
+ u32 offset;
+};
+
+struct lldp_stats_stc {
+ u32 tx_frames_total;
+ u32 rx_frames_total;
+ u32 rx_frames_discarded;
+ u32 rx_age_outs;
+};
+
+struct get_att_ctrl_stc {
+ u32 disabled_attns;
+ u32 controllable_attns;
+};
+
+struct trace_filter_stc {
+ u32 level;
+ u32 modules;
+};
+
+union drv_union_data {
+ struct mcp_mac wol_mac;
+
+ struct eth_phy_cfg drv_phy_cfg;
+
+ struct mcp_val64 val64;
+
+ u8 raw_data[MCP_DRV_NVM_BUF_LEN];
+
+ struct mcp_file_att file_att;
+
+ u32 ack_vf_disabled[EXT_VF_BITMAP_SIZE_IN_DWORDS];
+
+ struct drv_version_stc drv_version;
+
+ struct lan_stats_stc lan_stats;
+ struct fcoe_stats_stc fcoe_stats;
+ struct iscsi_stats_stc iscsi_stats;
+ struct rdma_stats_stc rdma_stats;
+ struct ocbb_data_stc ocbb_info;
+ struct temperature_status_stc temp_info;
+ struct resource_info resource;
+ struct bist_nvm_image_att nvm_image_att;
+ struct mdump_config_stc mdump_config;
+ struct mcp_mac lldp_mac;
+ struct mcp_wwn fcoe_fabric_name;
+ u32 dword;
+
+ struct load_req_stc load_req;
+ struct load_rsp_stc load_rsp;
+ struct mdump_retain_data_stc mdump_retain;
+ struct attribute_cmd_write_stc attribute_cmd_write;
+ struct lldp_stats_stc lldp_stats;
+ struct pcie_stats_stc pcie_stats;
+
+ struct get_att_ctrl_stc get_att_ctrl;
+ struct fcoe_cap_stc fcoe_cap;
+ struct trace_filter_stc trace_filter;
+};
+
+struct public_drv_mb {
+ u32 drv_mb_header;
+#define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff
+#define DRV_MSG_SEQ_NUMBER_OFFSET 0
+#define DRV_MSG_CODE_MASK 0xffff0000
+#define DRV_MSG_CODE_OFFSET 16
+
+ u32 drv_mb_param;
+
+ u32 fw_mb_header;
+#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff
+#define FW_MSG_SEQ_NUMBER_OFFSET 0
+#define FW_MSG_CODE_MASK 0xffff0000
+#define FW_MSG_CODE_OFFSET 16
+
+ u32 fw_mb_param;
+
+ u32 drv_pulse_mb;
+#define DRV_PULSE_SEQ_MASK 0x00007fff
+#define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000
+#define DRV_PULSE_ALWAYS_ALIVE 0x00008000
+
+ u32 mcp_pulse_mb;
+#define MCP_PULSE_SEQ_MASK 0x00007fff
+#define MCP_PULSE_ALWAYS_ALIVE 0x00008000
+#define MCP_EVENT_MASK 0xffff0000
+#define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000
+
+ union drv_union_data union_data;
+};
+
+#define DRV_MSG_CODE(_code_) ((_code_) << DRV_MSG_CODE_OFFSET)
+enum drv_msg_code_enum {
+ DRV_MSG_CODE_NVM_PUT_FILE_BEGIN = DRV_MSG_CODE(0x0001),
+ DRV_MSG_CODE_NVM_PUT_FILE_DATA = DRV_MSG_CODE(0x0002),
+ DRV_MSG_CODE_NVM_GET_FILE_ATT = DRV_MSG_CODE(0x0003),
+ DRV_MSG_CODE_NVM_READ_NVRAM = DRV_MSG_CODE(0x0005),
+ DRV_MSG_CODE_NVM_WRITE_NVRAM = DRV_MSG_CODE(0x0006),
+ DRV_MSG_CODE_MCP_RESET = DRV_MSG_CODE(0x0009),
+ DRV_MSG_CODE_SET_VERSION = DRV_MSG_CODE(0x000f),
+ DRV_MSG_CODE_MCP_HALT = DRV_MSG_CODE(0x0010),
+ DRV_MSG_CODE_SET_VMAC = DRV_MSG_CODE(0x0011),
+ DRV_MSG_CODE_GET_VMAC = DRV_MSG_CODE(0x0012),
+ DRV_MSG_CODE_GET_STATS = DRV_MSG_CODE(0x0013),
+ DRV_MSG_CODE_TRANSCEIVER_READ = DRV_MSG_CODE(0x0016),
+ DRV_MSG_CODE_MASK_PARITIES = DRV_MSG_CODE(0x001a),
+ DRV_MSG_CODE_BIST_TEST = DRV_MSG_CODE(0x001e),
+ DRV_MSG_CODE_SET_LED_MODE = DRV_MSG_CODE(0x0020),
+ DRV_MSG_CODE_RESOURCE_CMD = DRV_MSG_CODE(0x0023),
+ DRV_MSG_CODE_MDUMP_CMD = DRV_MSG_CODE(0x0025),
+ DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL = DRV_MSG_CODE(0x002b),
+ DRV_MSG_CODE_OS_WOL = DRV_MSG_CODE(0x002e),
+ DRV_MSG_CODE_GET_TLV_DONE = DRV_MSG_CODE(0x002f),
+ DRV_MSG_CODE_FEATURE_SUPPORT = DRV_MSG_CODE(0x0030),
+ DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT = DRV_MSG_CODE(0x0031),
+ DRV_MSG_CODE_GET_ENGINE_CONFIG = DRV_MSG_CODE(0x0037),
+ DRV_MSG_CODE_GET_NVM_CFG_OPTION = DRV_MSG_CODE(0x003e),
+ DRV_MSG_CODE_SET_NVM_CFG_OPTION = DRV_MSG_CODE(0x003f),
+ DRV_MSG_CODE_INITIATE_PF_FLR = DRV_MSG_CODE(0x0201),
+ DRV_MSG_CODE_LOAD_REQ = DRV_MSG_CODE(0x1000),
+ DRV_MSG_CODE_LOAD_DONE = DRV_MSG_CODE(0x1100),
+ DRV_MSG_CODE_INIT_HW = DRV_MSG_CODE(0x1200),
+ DRV_MSG_CODE_CANCEL_LOAD_REQ = DRV_MSG_CODE(0x1300),
+ DRV_MSG_CODE_UNLOAD_REQ = DRV_MSG_CODE(0x2000),
+ DRV_MSG_CODE_UNLOAD_DONE = DRV_MSG_CODE(0x2100),
+ DRV_MSG_CODE_INIT_PHY = DRV_MSG_CODE(0x2200),
+ DRV_MSG_CODE_LINK_RESET = DRV_MSG_CODE(0x2300),
+ DRV_MSG_CODE_SET_DCBX = DRV_MSG_CODE(0x2500),
+ DRV_MSG_CODE_OV_UPDATE_CURR_CFG = DRV_MSG_CODE(0x2600),
+ DRV_MSG_CODE_OV_UPDATE_BUS_NUM = DRV_MSG_CODE(0x2700),
+ DRV_MSG_CODE_OV_UPDATE_BOOT_PROGRESS = DRV_MSG_CODE(0x2800),
+ DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER = DRV_MSG_CODE(0x2900),
+ DRV_MSG_CODE_NIG_DRAIN = DRV_MSG_CODE(0x3000),
+ DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE = DRV_MSG_CODE(0x3100),
+ DRV_MSG_CODE_BW_UPDATE_ACK = DRV_MSG_CODE(0x3200),
+ DRV_MSG_CODE_OV_UPDATE_MTU = DRV_MSG_CODE(0x3300),
+ DRV_MSG_GET_RESOURCE_ALLOC_MSG = DRV_MSG_CODE(0x3400),
+ DRV_MSG_SET_RESOURCE_VALUE_MSG = DRV_MSG_CODE(0x3500),
+ DRV_MSG_CODE_OV_UPDATE_WOL = DRV_MSG_CODE(0x3800),
+ DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE = DRV_MSG_CODE(0x3900),
+ DRV_MSG_CODE_S_TAG_UPDATE_ACK = DRV_MSG_CODE(0x3b00),
+ DRV_MSG_CODE_GET_OEM_UPDATES = DRV_MSG_CODE(0x4100),
+ DRV_MSG_CODE_GET_PPFID_BITMAP = DRV_MSG_CODE(0x4300),
+ DRV_MSG_CODE_VF_DISABLED_DONE = DRV_MSG_CODE(0xc000),
+ DRV_MSG_CODE_CFG_VF_MSIX = DRV_MSG_CODE(0xc001),
+ DRV_MSG_CODE_CFG_PF_VFS_MSIX = DRV_MSG_CODE(0xc002),
+ DRV_MSG_CODE_DEBUG_DATA_SEND = DRV_MSG_CODE(0xc004),
+ DRV_MSG_CODE_GET_MANAGEMENT_STATUS = DRV_MSG_CODE(0xc007),
+};
+
+#define DRV_MSG_CODE_VMAC_TYPE_SHIFT 4
+#define DRV_MSG_CODE_VMAC_TYPE_MASK 0x30
+#define DRV_MSG_CODE_VMAC_TYPE_MAC 1
+#define DRV_MSG_CODE_VMAC_TYPE_WWNN 2
+#define DRV_MSG_CODE_VMAC_TYPE_WWPN 3
+
+/* DRV_MSG_CODE_RETAIN_VMAC parameters */
+#define DRV_MSG_CODE_RETAIN_VMAC_FUNC_SHIFT 0
+#define DRV_MSG_CODE_RETAIN_VMAC_FUNC_MASK 0xf
+
+#define DRV_MSG_CODE_RETAIN_VMAC_TYPE_SHIFT 4
+#define DRV_MSG_CODE_RETAIN_VMAC_TYPE_MASK 0x70
+#define DRV_MSG_CODE_RETAIN_VMAC_TYPE_L2 0
+#define DRV_MSG_CODE_RETAIN_VMAC_TYPE_ISCSI 1
+#define DRV_MSG_CODE_RETAIN_VMAC_TYPE_FCOE 2
+#define DRV_MSG_CODE_RETAIN_VMAC_TYPE_WWNN 3
+#define DRV_MSG_CODE_RETAIN_VMAC_TYPE_WWPN 4
+
+#define DRV_MSG_CODE_MCP_RESET_FORCE 0xf04ce
+
+#define DRV_MSG_CODE_STATS_TYPE_LAN 1
+#define DRV_MSG_CODE_STATS_TYPE_FCOE 2
+#define DRV_MSG_CODE_STATS_TYPE_ISCSI 3
+#define DRV_MSG_CODE_STATS_TYPE_RDMA 4
+
+#define BW_MAX_MASK 0x000000ff
+#define BW_MAX_OFFSET 0
+#define BW_MIN_MASK 0x0000ff00
+#define BW_MIN_OFFSET 8
+
+#define DRV_MSG_FAN_FAILURE_TYPE BIT(0)
+#define DRV_MSG_TEMPERATURE_FAILURE_TYPE BIT(1)
+
+#define RESOURCE_CMD_REQ_RESC_MASK 0x0000001F
+#define RESOURCE_CMD_REQ_RESC_SHIFT 0
+#define RESOURCE_CMD_REQ_OPCODE_MASK 0x000000E0
+#define RESOURCE_CMD_REQ_OPCODE_SHIFT 5
+#define RESOURCE_OPCODE_REQ 1
+#define RESOURCE_OPCODE_REQ_WO_AGING 2
+#define RESOURCE_OPCODE_REQ_W_AGING 3
+#define RESOURCE_OPCODE_RELEASE 4
+#define RESOURCE_OPCODE_FORCE_RELEASE 5
+#define RESOURCE_CMD_REQ_AGE_MASK 0x0000FF00
+#define RESOURCE_CMD_REQ_AGE_SHIFT 8
+
+#define RESOURCE_CMD_RSP_OWNER_MASK 0x000000FF
+#define RESOURCE_CMD_RSP_OWNER_SHIFT 0
+#define RESOURCE_CMD_RSP_OPCODE_MASK 0x00000700
+#define RESOURCE_CMD_RSP_OPCODE_SHIFT 8
+#define RESOURCE_OPCODE_GNT 1
+#define RESOURCE_OPCODE_BUSY 2
+#define RESOURCE_OPCODE_RELEASED 3
+#define RESOURCE_OPCODE_RELEASED_PREVIOUS 4
+#define RESOURCE_OPCODE_WRONG_OWNER 5
+#define RESOURCE_OPCODE_UNKNOWN_CMD 255
+
+#define RESOURCE_DUMP 0
+
+/* DRV_MSG_CODE_MDUMP_CMD parameters */
+#define MDUMP_DRV_PARAM_OPCODE_MASK 0x000000ff
+#define DRV_MSG_CODE_MDUMP_ACK 0x01
+#define DRV_MSG_CODE_MDUMP_SET_VALUES 0x02
+#define DRV_MSG_CODE_MDUMP_TRIGGER 0x03
+#define DRV_MSG_CODE_MDUMP_GET_CONFIG 0x04
+#define DRV_MSG_CODE_MDUMP_SET_ENABLE 0x05
+#define DRV_MSG_CODE_MDUMP_CLEAR_LOGS 0x06
+#define DRV_MSG_CODE_MDUMP_GET_RETAIN 0x07
+#define DRV_MSG_CODE_MDUMP_CLR_RETAIN 0x08
+
+#define DRV_MSG_CODE_HW_DUMP_TRIGGER 0x0a
+
+#define DRV_MSG_CODE_MDUMP_FREE_DRIVER_BUF 0x0b
+#define DRV_MSG_CODE_MDUMP_GEN_LINK_DUMP 0x0c
+#define DRV_MSG_CODE_MDUMP_GEN_IDLE_CHK 0x0d
+
+/* DRV_MSG_CODE_MDUMP_CMD options */
+#define MDUMP_DRV_PARAM_OPTION_MASK 0x00000f00
+#define DRV_MSG_CODE_MDUMP_USE_DRIVER_BUF_OFFSET 8
+#define DRV_MSG_CODE_MDUMP_USE_DRIVER_BUF_MASK 0x100
+
+/* DRV_MSG_CODE_EXT_PHY_READ/DRV_MSG_CODE_EXT_PHY_WRITE parameters */
+#define DRV_MB_PARAM_ADDR_SHIFT 0
+#define DRV_MB_PARAM_ADDR_MASK 0x0000FFFF
+#define DRV_MB_PARAM_DEVAD_SHIFT 16
+#define DRV_MB_PARAM_DEVAD_MASK 0x001F0000
+#define DRV_MB_PARAM_PORT_SHIFT 21
+#define DRV_MB_PARAM_PORT_MASK 0x00600000
+
+/* DRV_MSG_CODE_PMBUS_READ/DRV_MSG_CODE_PMBUS_WRITE parameters */
+#define DRV_MB_PARAM_PMBUS_CMD_SHIFT 0
+#define DRV_MB_PARAM_PMBUS_CMD_MASK 0xFF
+#define DRV_MB_PARAM_PMBUS_LEN_SHIFT 8
+#define DRV_MB_PARAM_PMBUS_LEN_MASK 0x300
+#define DRV_MB_PARAM_PMBUS_DATA_SHIFT 16
+#define DRV_MB_PARAM_PMBUS_DATA_MASK 0xFFFF0000
+
+/* UNLOAD_REQ params */
+#define DRV_MB_PARAM_UNLOAD_WOL_UNKNOWN 0x00000000
+#define DRV_MB_PARAM_UNLOAD_WOL_MCP 0x00000001
+#define DRV_MB_PARAM_UNLOAD_WOL_DISABLED 0x00000002
+#define DRV_MB_PARAM_UNLOAD_WOL_ENABLED 0x00000003
+
+/* UNLOAD_DONE_params */
+#define DRV_MB_PARAM_UNLOAD_NON_D3_POWER 0x00000001
+
+/* INIT_PHY params */
+#define DRV_MB_PARAM_INIT_PHY_FORCE 0x00000001
+#define DRV_MB_PARAM_INIT_PHY_DONT_CARE 0x00000002
+
+/* LLDP / DCBX params*/
+#define DRV_MB_PARAM_LLDP_SEND_MASK 0x00000001
+#define DRV_MB_PARAM_LLDP_SEND_SHIFT 0
+#define DRV_MB_PARAM_LLDP_AGENT_MASK 0x00000006
+#define DRV_MB_PARAM_LLDP_AGENT_SHIFT 1
+#define DRV_MB_PARAM_LLDP_TLV_RX_VALID_MASK 0x00000001
+#define DRV_MB_PARAM_LLDP_TLV_RX_VALID_SHIFT 0
+#define DRV_MB_PARAM_LLDP_TLV_RX_TYPE_MASK 0x000007f0
+#define DRV_MB_PARAM_LLDP_TLV_RX_TYPE_SHIFT 4
+#define DRV_MB_PARAM_DCBX_NOTIFY_MASK 0x00000008
+#define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT 3
+#define DRV_MB_PARAM_DCBX_ADMIN_CFG_NOTIFY_MASK 0x00000010
+#define DRV_MB_PARAM_DCBX_ADMIN_CFG_NOTIFY_SHIFT 4
+
+#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_MASK 0x000000FF
+#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_SHIFT 0
+
+#define DRV_MB_PARAM_NVM_PUT_FILE_TYPE_MASK 0x000000ff
+#define DRV_MB_PARAM_NVM_PUT_FILE_TYPE_SHIFT 0
+#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MFW 0x1
+#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_IMAGE 0x2
+
+#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI 0x3
+#define DRV_MB_PARAM_NVM_OFFSET_OFFSET 0
+#define DRV_MB_PARAM_NVM_OFFSET_MASK 0x00FFFFFF
+#define DRV_MB_PARAM_NVM_LEN_OFFSET 24
+#define DRV_MB_PARAM_NVM_LEN_MASK 0xFF000000
+
+#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT 0
+#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK 0x000000FF
+#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT 8
+#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK 0x0000FF00
+
+#define DRV_MB_PARAM_OV_CURR_CFG_SHIFT 0
+#define DRV_MB_PARAM_OV_CURR_CFG_MASK 0x0000000F
+#define DRV_MB_PARAM_OV_CURR_CFG_NONE 0
+#define DRV_MB_PARAM_OV_CURR_CFG_OS 1
+#define DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC 2
+#define DRV_MB_PARAM_OV_CURR_CFG_OTHER 3
+
+#define DRV_MB_PARAM_OV_STORM_FW_VER_SHIFT 0
+#define DRV_MB_PARAM_OV_STORM_FW_VER_MASK 0xFFFFFFFF
+#define DRV_MB_PARAM_OV_STORM_FW_VER_MAJOR_MASK 0xFF000000
+#define DRV_MB_PARAM_OV_STORM_FW_VER_MINOR_MASK 0x00FF0000
+#define DRV_MB_PARAM_OV_STORM_FW_VER_BUILD_MASK 0x0000FF00
+#define DRV_MB_PARAM_OV_STORM_FW_VER_DROP_MASK 0x000000FF
+
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_SHIFT 0
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_MASK 0xF
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_UNKNOWN 0x1
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED 0x2
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_LOADING 0x3
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED 0x4
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE 0x5
+
+#define DRV_MB_PARAM_OV_MTU_SIZE_SHIFT 0
+#define DRV_MB_PARAM_OV_MTU_SIZE_MASK 0xFFFFFFFF
+
+#define DRV_MB_PARAM_WOL_MASK (DRV_MB_PARAM_WOL_DEFAULT | \
+ DRV_MB_PARAM_WOL_DISABLED | \
+ DRV_MB_PARAM_WOL_ENABLED)
+#define DRV_MB_PARAM_WOL_DEFAULT DRV_MB_PARAM_UNLOAD_WOL_MCP
+#define DRV_MB_PARAM_WOL_DISABLED DRV_MB_PARAM_UNLOAD_WOL_DISABLED
+#define DRV_MB_PARAM_WOL_ENABLED DRV_MB_PARAM_UNLOAD_WOL_ENABLED
+
+#define DRV_MB_PARAM_ESWITCH_MODE_MASK (DRV_MB_PARAM_ESWITCH_MODE_NONE | \
+ DRV_MB_PARAM_ESWITCH_MODE_VEB | \
+ DRV_MB_PARAM_ESWITCH_MODE_VEPA)
+#define DRV_MB_PARAM_ESWITCH_MODE_NONE 0x0
+#define DRV_MB_PARAM_ESWITCH_MODE_VEB 0x1
+#define DRV_MB_PARAM_ESWITCH_MODE_VEPA 0x2
+
+#define DRV_MB_PARAM_DUMMY_OEM_UPDATES_MASK 0x1
+#define DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET 0
+
+#define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0
+#define DRV_MB_PARAM_SET_LED_MODE_ON 0x1
+#define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2
+
+#define DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET 0
+#define DRV_MB_PARAM_TRANSCEIVER_PORT_MASK 0x00000003
+#define DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET 2
+#define DRV_MB_PARAM_TRANSCEIVER_SIZE_MASK 0x000000fc
+#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET 8
+#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK 0x0000ff00
+#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET 16
+#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_MASK 0xffff0000
+
+ /* Resource Allocation params - Driver version support */
+#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xffff0000
+#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16
+#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000ffff
+#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT 0
+
+#define DRV_MB_PARAM_BIST_UNKNOWN_TEST 0
+#define DRV_MB_PARAM_BIST_REGISTER_TEST 1
+#define DRV_MB_PARAM_BIST_CLOCK_TEST 2
+#define DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES 3
+#define DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX 4
+
+#define DRV_MB_PARAM_BIST_RC_UNKNOWN 0
+#define DRV_MB_PARAM_BIST_RC_PASSED 1
+#define DRV_MB_PARAM_BIST_RC_FAILED 2
+#define DRV_MB_PARAM_BIST_RC_INVALID_PARAMETER 3
+
+#define DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT 0
+#define DRV_MB_PARAM_BIST_TEST_INDEX_MASK 0x000000ff
+#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT 8
+#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_MASK 0x0000ff00
+
+#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_MASK 0x0000ffff
+#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_OFFSET 0
+#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_SMARTLINQ 0x00000001
+#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE 0x00000002
+#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_FEC_CONTROL 0x00000004
+#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EXT_SPEED_FEC_CONTROL 0x00000008
+#define DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK 0x00010000
+
+/* DRV_MSG_CODE_DEBUG_DATA_SEND parameters */
+#define DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE_OFFSET 0
+#define DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE_MASK 0xff
+
+/* Driver attributes params */
+#define DRV_MB_PARAM_ATTRIBUTE_KEY_OFFSET 0
+#define DRV_MB_PARAM_ATTRIBUTE_KEY_MASK 0x00ffffff
+#define DRV_MB_PARAM_ATTRIBUTE_CMD_OFFSET 24
+#define DRV_MB_PARAM_ATTRIBUTE_CMD_MASK 0xff000000
+
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_OFFSET 0
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_MASK 0x0000ffff
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_IGNORE 0x0000ffff
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_SHIFT 0
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ALL_SHIFT 16
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ALL_MASK 0x00010000
+#define DRV_MB_PARAM_NVM_CFG_OPTION_INIT_SHIFT 17
+#define DRV_MB_PARAM_NVM_CFG_OPTION_INIT_MASK 0x00020000
+#define DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT_SHIFT 18
+#define DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT_MASK 0x00040000
+#define DRV_MB_PARAM_NVM_CFG_OPTION_FREE_SHIFT 19
+#define DRV_MB_PARAM_NVM_CFG_OPTION_FREE_MASK 0x00080000
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL_SHIFT 20
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL_MASK 0x00100000
+#define DRV_MB_PARAM_NVM_CFG_OPTION_DEFAULT_RESTORE_ALL_SHIFT 21
+#define DRV_MB_PARAM_NVM_CFG_OPTION_DEFAULT_RESTORE_ALL_MASK 0x00200000
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID_SHIFT 24
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID_MASK 0x0f000000
+
+/*DRV_MSG_CODE_GET_PERM_MAC parametres*/
+#define DRV_MSG_CODE_GET_PERM_MAC_TYPE_SHIFT 0
+#define DRV_MSG_CODE_GET_PERM_MAC_TYPE_MASK 0xF
+#define DRV_MSG_CODE_GET_PERM_MAC_TYPE_PF 0
+#define DRV_MSG_CODE_GET_PERM_MAC_TYPE_BMC 1
+#define DRV_MSG_CODE_GET_PERM_MAC_TYPE_VF 2
+#define DRV_MSG_CODE_GET_PERM_MAC_TYPE_LLDP 3
+#define DRV_MSG_CODE_GET_PERM_MAC_TYPE_MAX 4
+#define DRV_MSG_CODE_GET_PERM_MAC_INDEX_SHIFT 8
+#define DRV_MSG_CODE_GET_PERM_MAC_INDEX_MASK 0xFFFF00
+
+#define FW_MSG_CODE(_code_) ((_code_) << FW_MSG_CODE_OFFSET)
+enum fw_msg_code_enum {
+ FW_MSG_CODE_UNSUPPORTED = FW_MSG_CODE(0x0000),
+ FW_MSG_CODE_NVM_OK = FW_MSG_CODE(0x0001),
+ FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK = FW_MSG_CODE(0x0040),
+ FW_MSG_CODE_PHY_OK = FW_MSG_CODE(0x0011),
+ FW_MSG_CODE_OK = FW_MSG_CODE(0x0016),
+ FW_MSG_CODE_ERROR = FW_MSG_CODE(0x0017),
+ FW_MSG_CODE_TRANSCEIVER_DIAG_OK = FW_MSG_CODE(0x0016),
+ FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT = FW_MSG_CODE(0x0002),
+ FW_MSG_CODE_MDUMP_INVALID_CMD = FW_MSG_CODE(0x0003),
+ FW_MSG_CODE_OS_WOL_SUPPORTED = FW_MSG_CODE(0x0080),
+ FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE = FW_MSG_CODE(0x0087),
+ FW_MSG_CODE_DRV_LOAD_ENGINE = FW_MSG_CODE(0x1010),
+ FW_MSG_CODE_DRV_LOAD_PORT = FW_MSG_CODE(0x1011),
+ FW_MSG_CODE_DRV_LOAD_FUNCTION = FW_MSG_CODE(0x1012),
+ FW_MSG_CODE_DRV_LOAD_REFUSED_PDA = FW_MSG_CODE(0x1020),
+ FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1 = FW_MSG_CODE(0x1021),
+ FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG = FW_MSG_CODE(0x1022),
+ FW_MSG_CODE_DRV_LOAD_REFUSED_HSI = FW_MSG_CODE(0x1023),
+ FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE = FW_MSG_CODE(0x1030),
+ FW_MSG_CODE_DRV_LOAD_REFUSED_REJECT = FW_MSG_CODE(0x1031),
+ FW_MSG_CODE_DRV_LOAD_DONE = FW_MSG_CODE(0x1110),
+ FW_MSG_CODE_DRV_UNLOAD_ENGINE = FW_MSG_CODE(0x2011),
+ FW_MSG_CODE_DRV_UNLOAD_PORT = FW_MSG_CODE(0x2012),
+ FW_MSG_CODE_DRV_UNLOAD_FUNCTION = FW_MSG_CODE(0x2013),
+ FW_MSG_CODE_DRV_UNLOAD_DONE = FW_MSG_CODE(0x2110),
+ FW_MSG_CODE_RESOURCE_ALLOC_OK = FW_MSG_CODE(0x3400),
+ FW_MSG_CODE_RESOURCE_ALLOC_UNKNOWN = FW_MSG_CODE(0x3500),
+ FW_MSG_CODE_S_TAG_UPDATE_ACK_DONE = FW_MSG_CODE(0x3b00),
+ FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE = FW_MSG_CODE(0xb001),
+ FW_MSG_CODE_DEBUG_NOT_ENABLED = FW_MSG_CODE(0xb00a),
+ FW_MSG_CODE_DEBUG_DATA_SEND_OK = FW_MSG_CODE(0xb00b),
+};
+
+#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xffff0000
+#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16
+#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000ffff
+#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT 0
+
+/* Get PF RDMA protocol command response */
+#define FW_MB_PARAM_GET_PF_RDMA_NONE 0x0
+#define FW_MB_PARAM_GET_PF_RDMA_ROCE 0x1
+#define FW_MB_PARAM_GET_PF_RDMA_IWARP 0x2
+#define FW_MB_PARAM_GET_PF_RDMA_BOTH 0x3
+
+/* Get MFW feature support response */
+#define FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ BIT(0)
+#define FW_MB_PARAM_FEATURE_SUPPORT_EEE BIT(1)
+#define FW_MB_PARAM_FEATURE_SUPPORT_DRV_LOAD_TO BIT(2)
+#define FW_MB_PARAM_FEATURE_SUPPORT_LP_PRES_DET BIT(3)
+#define FW_MB_PARAM_FEATURE_SUPPORT_RELAXED_ORD BIT(4)
+#define FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL BIT(5)
+#define FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL BIT(6)
+#define FW_MB_PARAM_FEATURE_SUPPORT_IGU_CLEANUP BIT(7)
+#define FW_MB_PARAM_FEATURE_SUPPORT_VF_DPM BIT(8)
+#define FW_MB_PARAM_FEATURE_SUPPORT_IDLE_CHK BIT(9)
+#define FW_MB_PARAM_FEATURE_SUPPORT_VLINK BIT(16)
+#define FW_MB_PARAM_FEATURE_SUPPORT_DISABLE_LLDP BIT(17)
+#define FW_MB_PARAM_FEATURE_SUPPORT_ENHANCED_SYS_LCK BIT(18)
+#define FW_MB_PARAM_FEATURE_SUPPORT_RESTORE_DEFAULT_CFG BIT(19)
+
+#define FW_MB_PARAM_MANAGEMENT_STATUS_LOCKDOWN_ENABLED 0x00000001
+
+#define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR BIT(0)
+
+#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID_MASK 0x00000001
+#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID_SHIFT 0
+#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE_MASK 0x00000002
+#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE_SHIFT 1
+#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID_MASK 0x00000004
+#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID_SHIFT 2
+#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE_MASK 0x00000008
+#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE_SHIFT 3
+
+#define FW_MB_PARAM_PPFID_BITMAP_MASK 0xff
+#define FW_MB_PARAM_PPFID_BITMAP_SHIFT 0
+
+#define FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET_MASK 0x00ffffff
+#define FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET_SHIFT 0
+#define FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE_MASK 0xff000000
+#define FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE_SHIFT 24
+
+enum MFW_DRV_MSG_TYPE {
+ MFW_DRV_MSG_LINK_CHANGE,
+ MFW_DRV_MSG_FLR_FW_ACK_FAILED,
+ MFW_DRV_MSG_VF_DISABLED,
+ MFW_DRV_MSG_LLDP_DATA_UPDATED,
+ MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED,
+ MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED,
+ MFW_DRV_MSG_ERROR_RECOVERY,
+ MFW_DRV_MSG_BW_UPDATE,
+ MFW_DRV_MSG_S_TAG_UPDATE,
+ MFW_DRV_MSG_GET_LAN_STATS,
+ MFW_DRV_MSG_GET_FCOE_STATS,
+ MFW_DRV_MSG_GET_ISCSI_STATS,
+ MFW_DRV_MSG_GET_RDMA_STATS,
+ MFW_DRV_MSG_FAILURE_DETECTED,
+ MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE,
+ MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED,
+ MFW_DRV_MSG_EEE_NEGOTIATION_COMPLETE,
+ MFW_DRV_MSG_GET_TLV_REQ,
+ MFW_DRV_MSG_OEM_CFG_UPDATE,
+ MFW_DRV_MSG_LLDP_RECEIVED_TLVS_UPDATED,
+ MFW_DRV_MSG_GENERIC_IDC,
+ MFW_DRV_MSG_XCVR_TX_FAULT,
+ MFW_DRV_MSG_XCVR_RX_LOS,
+ MFW_DRV_MSG_GET_FCOE_CAP,
+ MFW_DRV_MSG_GEN_LINK_DUMP,
+ MFW_DRV_MSG_GEN_IDLE_CHK,
+ MFW_DRV_MSG_DCBX_ADMIN_CFG_APPLIED,
+ MFW_DRV_MSG_MAX
+};
+
+#define MFW_DRV_MSG_MAX_DWORDS(msgs) ((((msgs) - 1) >> 2) + 1)
+#define MFW_DRV_MSG_DWORD(msg_id) ((msg_id) >> 2)
+#define MFW_DRV_MSG_OFFSET(msg_id) (((msg_id) & 0x3) << 3)
+#define MFW_DRV_MSG_MASK(msg_id) (0xff << MFW_DRV_MSG_OFFSET(msg_id))
+
+struct public_mfw_mb {
+ u32 sup_msgs;
+ u32 msg[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)];
+ u32 ack[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)];
+};
+
+enum public_sections {
+ PUBLIC_DRV_MB,
+ PUBLIC_MFW_MB,
+ PUBLIC_GLOBAL,
+ PUBLIC_PATH,
+ PUBLIC_PORT,
+ PUBLIC_FUNC,
+ PUBLIC_MAX_SECTIONS
+};
+
+struct drv_ver_info_stc {
+ u32 ver;
+ u8 name[32];
+};
+
+/* Runtime data needs about 1/2K. We use 2K to be on the safe side.
+ * Please make sure data does not exceed this size.
+ */
+#define NUM_RUNTIME_DWORDS 16
+struct drv_init_hw_stc {
+ u32 init_hw_bitmask[NUM_RUNTIME_DWORDS];
+ u32 init_hw_data[NUM_RUNTIME_DWORDS * 32];
+};
+
+struct mcp_public_data {
+ u32 num_sections;
+ u32 sections[PUBLIC_MAX_SECTIONS];
+ struct public_drv_mb drv_mb[MCP_GLOB_FUNC_MAX];
+ struct public_mfw_mb mfw_mb[MCP_GLOB_FUNC_MAX];
+ struct public_global global;
+ struct public_path path[MCP_GLOB_PATH_MAX];
+ struct public_port port[MCP_GLOB_PORT_MAX];
+ struct public_func func[MCP_GLOB_FUNC_MAX];
+};
+
+#define I2C_TRANSCEIVER_ADDR 0xa0
+#define MAX_I2C_TRANSACTION_SIZE 16
+#define MAX_I2C_TRANSCEIVER_PAGE_SIZE 256
+
+/* OCBB definitions */
+enum tlvs {
+ /* Category 1: Device Properties */
+ DRV_TLV_CLP_STR,
+ DRV_TLV_CLP_STR_CTD,
+ /* Category 6: Device Configuration */
+ DRV_TLV_SCSI_TO,
+ DRV_TLV_R_T_TOV,
+ DRV_TLV_R_A_TOV,
+ DRV_TLV_E_D_TOV,
+ DRV_TLV_CR_TOV,
+ DRV_TLV_BOOT_TYPE,
+ /* Category 8: Port Configuration */
+ DRV_TLV_NPIV_ENABLED,
+ /* Category 10: Function Configuration */
+ DRV_TLV_FEATURE_FLAGS,
+ DRV_TLV_LOCAL_ADMIN_ADDR,
+ DRV_TLV_ADDITIONAL_MAC_ADDR_1,
+ DRV_TLV_ADDITIONAL_MAC_ADDR_2,
+ DRV_TLV_LSO_MAX_OFFLOAD_SIZE,
+ DRV_TLV_LSO_MIN_SEGMENT_COUNT,
+ DRV_TLV_PROMISCUOUS_MODE,
+ DRV_TLV_TX_DESCRIPTORS_QUEUE_SIZE,
+ DRV_TLV_RX_DESCRIPTORS_QUEUE_SIZE,
+ DRV_TLV_NUM_OF_NET_QUEUE_VMQ_CFG,
+ DRV_TLV_FLEX_NIC_OUTER_VLAN_ID,
+ DRV_TLV_OS_DRIVER_STATES,
+ DRV_TLV_PXE_BOOT_PROGRESS,
+ /* Category 12: FC/FCoE Configuration */
+ DRV_TLV_NPIV_STATE,
+ DRV_TLV_NUM_OF_NPIV_IDS,
+ DRV_TLV_SWITCH_NAME,
+ DRV_TLV_SWITCH_PORT_NUM,
+ DRV_TLV_SWITCH_PORT_ID,
+ DRV_TLV_VENDOR_NAME,
+ DRV_TLV_SWITCH_MODEL,
+ DRV_TLV_SWITCH_FW_VER,
+ DRV_TLV_QOS_PRIORITY_PER_802_1P,
+ DRV_TLV_PORT_ALIAS,
+ DRV_TLV_PORT_STATE,
+ DRV_TLV_FIP_TX_DESCRIPTORS_QUEUE_SIZE,
+ DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_SIZE,
+ DRV_TLV_LINK_FAILURE_COUNT,
+ DRV_TLV_FCOE_BOOT_PROGRESS,
+ /* Category 13: iSCSI Configuration */
+ DRV_TLV_TARGET_LLMNR_ENABLED,
+ DRV_TLV_HEADER_DIGEST_FLAG_ENABLED,
+ DRV_TLV_DATA_DIGEST_FLAG_ENABLED,
+ DRV_TLV_AUTHENTICATION_METHOD,
+ DRV_TLV_ISCSI_BOOT_TARGET_PORTAL,
+ DRV_TLV_MAX_FRAME_SIZE,
+ DRV_TLV_PDU_TX_DESCRIPTORS_QUEUE_SIZE,
+ DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_SIZE,
+ DRV_TLV_ISCSI_BOOT_PROGRESS,
+ /* Category 20: Device Data */
+ DRV_TLV_PCIE_BUS_RX_UTILIZATION,
+ DRV_TLV_PCIE_BUS_TX_UTILIZATION,
+ DRV_TLV_DEVICE_CPU_CORES_UTILIZATION,
+ DRV_TLV_LAST_VALID_DCC_TLV_RECEIVED,
+ DRV_TLV_NCSI_RX_BYTES_RECEIVED,
+ DRV_TLV_NCSI_TX_BYTES_SENT,
+ /* Category 22: Base Port Data */
+ DRV_TLV_RX_DISCARDS,
+ DRV_TLV_RX_ERRORS,
+ DRV_TLV_TX_ERRORS,
+ DRV_TLV_TX_DISCARDS,
+ DRV_TLV_RX_FRAMES_RECEIVED,
+ DRV_TLV_TX_FRAMES_SENT,
+ /* Category 23: FC/FCoE Port Data */
+ DRV_TLV_RX_BROADCAST_PACKETS,
+ DRV_TLV_TX_BROADCAST_PACKETS,
+ /* Category 28: Base Function Data */
+ DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV4,
+ DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV6,
+ DRV_TLV_TX_DESCRIPTOR_QUEUE_AVG_DEPTH,
+ DRV_TLV_RX_DESCRIPTORS_QUEUE_AVG_DEPTH,
+ DRV_TLV_PF_RX_FRAMES_RECEIVED,
+ DRV_TLV_RX_BYTES_RECEIVED,
+ DRV_TLV_PF_TX_FRAMES_SENT,
+ DRV_TLV_TX_BYTES_SENT,
+ DRV_TLV_IOV_OFFLOAD,
+ DRV_TLV_PCI_ERRORS_CAP_ID,
+ DRV_TLV_UNCORRECTABLE_ERROR_STATUS,
+ DRV_TLV_UNCORRECTABLE_ERROR_MASK,
+ DRV_TLV_CORRECTABLE_ERROR_STATUS,
+ DRV_TLV_CORRECTABLE_ERROR_MASK,
+ DRV_TLV_PCI_ERRORS_AECC_REGISTER,
+ DRV_TLV_TX_QUEUES_EMPTY,
+ DRV_TLV_RX_QUEUES_EMPTY,
+ DRV_TLV_TX_QUEUES_FULL,
+ DRV_TLV_RX_QUEUES_FULL,
+ /* Category 29: FC/FCoE Function Data */
+ DRV_TLV_FCOE_TX_DESCRIPTOR_QUEUE_AVG_DEPTH,
+ DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_AVG_DEPTH,
+ DRV_TLV_FCOE_RX_FRAMES_RECEIVED,
+ DRV_TLV_FCOE_RX_BYTES_RECEIVED,
+ DRV_TLV_FCOE_TX_FRAMES_SENT,
+ DRV_TLV_FCOE_TX_BYTES_SENT,
+ DRV_TLV_CRC_ERROR_COUNT,
+ DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_CRC_ERROR_1_TIMESTAMP,
+ DRV_TLV_CRC_ERROR_2_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_CRC_ERROR_2_TIMESTAMP,
+ DRV_TLV_CRC_ERROR_3_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_CRC_ERROR_3_TIMESTAMP,
+ DRV_TLV_CRC_ERROR_4_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_CRC_ERROR_4_TIMESTAMP,
+ DRV_TLV_CRC_ERROR_5_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_CRC_ERROR_5_TIMESTAMP,
+ DRV_TLV_LOSS_OF_SYNC_ERROR_COUNT,
+ DRV_TLV_LOSS_OF_SIGNAL_ERRORS,
+ DRV_TLV_PRIMITIVE_SEQUENCE_PROTOCOL_ERROR_COUNT,
+ DRV_TLV_DISPARITY_ERROR_COUNT,
+ DRV_TLV_CODE_VIOLATION_ERROR_COUNT,
+ DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1,
+ DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_2,
+ DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_3,
+ DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_4,
+ DRV_TLV_LAST_FLOGI_TIMESTAMP,
+ DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1,
+ DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_2,
+ DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_3,
+ DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_4,
+ DRV_TLV_LAST_FLOGI_ACC_TIMESTAMP,
+ DRV_TLV_LAST_FLOGI_RJT,
+ DRV_TLV_LAST_FLOGI_RJT_TIMESTAMP,
+ DRV_TLV_FDISCS_SENT_COUNT,
+ DRV_TLV_FDISC_ACCS_RECEIVED,
+ DRV_TLV_FDISC_RJTS_RECEIVED,
+ DRV_TLV_PLOGI_SENT_COUNT,
+ DRV_TLV_PLOGI_ACCS_RECEIVED,
+ DRV_TLV_PLOGI_RJTS_RECEIVED,
+ DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID,
+ DRV_TLV_PLOGI_1_TIMESTAMP,
+ DRV_TLV_PLOGI_2_SENT_DESTINATION_FC_ID,
+ DRV_TLV_PLOGI_2_TIMESTAMP,
+ DRV_TLV_PLOGI_3_SENT_DESTINATION_FC_ID,
+ DRV_TLV_PLOGI_3_TIMESTAMP,
+ DRV_TLV_PLOGI_4_SENT_DESTINATION_FC_ID,
+ DRV_TLV_PLOGI_4_TIMESTAMP,
+ DRV_TLV_PLOGI_5_SENT_DESTINATION_FC_ID,
+ DRV_TLV_PLOGI_5_TIMESTAMP,
+ DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_PLOGI_1_ACC_TIMESTAMP,
+ DRV_TLV_PLOGI_2_ACC_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_PLOGI_2_ACC_TIMESTAMP,
+ DRV_TLV_PLOGI_3_ACC_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_PLOGI_3_ACC_TIMESTAMP,
+ DRV_TLV_PLOGI_4_ACC_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_PLOGI_4_ACC_TIMESTAMP,
+ DRV_TLV_PLOGI_5_ACC_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_PLOGI_5_ACC_TIMESTAMP,
+ DRV_TLV_LOGOS_ISSUED,
+ DRV_TLV_LOGO_ACCS_RECEIVED,
+ DRV_TLV_LOGO_RJTS_RECEIVED,
+ DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_LOGO_1_TIMESTAMP,
+ DRV_TLV_LOGO_2_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_LOGO_2_TIMESTAMP,
+ DRV_TLV_LOGO_3_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_LOGO_3_TIMESTAMP,
+ DRV_TLV_LOGO_4_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_LOGO_4_TIMESTAMP,
+ DRV_TLV_LOGO_5_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_LOGO_5_TIMESTAMP,
+ DRV_TLV_LOGOS_RECEIVED,
+ DRV_TLV_ACCS_ISSUED,
+ DRV_TLV_PRLIS_ISSUED,
+ DRV_TLV_ACCS_RECEIVED,
+ DRV_TLV_ABTS_SENT_COUNT,
+ DRV_TLV_ABTS_ACCS_RECEIVED,
+ DRV_TLV_ABTS_RJTS_RECEIVED,
+ DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID,
+ DRV_TLV_ABTS_1_TIMESTAMP,
+ DRV_TLV_ABTS_2_SENT_DESTINATION_FC_ID,
+ DRV_TLV_ABTS_2_TIMESTAMP,
+ DRV_TLV_ABTS_3_SENT_DESTINATION_FC_ID,
+ DRV_TLV_ABTS_3_TIMESTAMP,
+ DRV_TLV_ABTS_4_SENT_DESTINATION_FC_ID,
+ DRV_TLV_ABTS_4_TIMESTAMP,
+ DRV_TLV_ABTS_5_SENT_DESTINATION_FC_ID,
+ DRV_TLV_ABTS_5_TIMESTAMP,
+ DRV_TLV_RSCNS_RECEIVED,
+ DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1,
+ DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_2,
+ DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_3,
+ DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_4,
+ DRV_TLV_LUN_RESETS_ISSUED,
+ DRV_TLV_ABORT_TASK_SETS_ISSUED,
+ DRV_TLV_TPRLOS_SENT,
+ DRV_TLV_NOS_SENT_COUNT,
+ DRV_TLV_NOS_RECEIVED_COUNT,
+ DRV_TLV_OLS_COUNT,
+ DRV_TLV_LR_COUNT,
+ DRV_TLV_LRR_COUNT,
+ DRV_TLV_LIP_SENT_COUNT,
+ DRV_TLV_LIP_RECEIVED_COUNT,
+ DRV_TLV_EOFA_COUNT,
+ DRV_TLV_EOFNI_COUNT,
+ DRV_TLV_SCSI_STATUS_CHECK_CONDITION_COUNT,
+ DRV_TLV_SCSI_STATUS_CONDITION_MET_COUNT,
+ DRV_TLV_SCSI_STATUS_BUSY_COUNT,
+ DRV_TLV_SCSI_STATUS_INTERMEDIATE_COUNT,
+ DRV_TLV_SCSI_STATUS_INTERMEDIATE_CONDITION_MET_COUNT,
+ DRV_TLV_SCSI_STATUS_RESERVATION_CONFLICT_COUNT,
+ DRV_TLV_SCSI_STATUS_TASK_SET_FULL_COUNT,
+ DRV_TLV_SCSI_STATUS_ACA_ACTIVE_COUNT,
+ DRV_TLV_SCSI_STATUS_TASK_ABORTED_COUNT,
+ DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ,
+ DRV_TLV_SCSI_CHECK_1_TIMESTAMP,
+ DRV_TLV_SCSI_CHECK_CONDITION_2_RECEIVED_SK_ASC_ASCQ,
+ DRV_TLV_SCSI_CHECK_2_TIMESTAMP,
+ DRV_TLV_SCSI_CHECK_CONDITION_3_RECEIVED_SK_ASC_ASCQ,
+ DRV_TLV_SCSI_CHECK_3_TIMESTAMP,
+ DRV_TLV_SCSI_CHECK_CONDITION_4_RECEIVED_SK_ASC_ASCQ,
+ DRV_TLV_SCSI_CHECK_4_TIMESTAMP,
+ DRV_TLV_SCSI_CHECK_CONDITION_5_RECEIVED_SK_ASC_ASCQ,
+ DRV_TLV_SCSI_CHECK_5_TIMESTAMP,
+ /* Category 30: iSCSI Function Data */
+ DRV_TLV_PDU_TX_DESCRIPTOR_QUEUE_AVG_DEPTH,
+ DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_AVG_DEPTH,
+ DRV_TLV_ISCSI_PDU_RX_FRAMES_RECEIVED,
+ DRV_TLV_ISCSI_PDU_RX_BYTES_RECEIVED,
+ DRV_TLV_ISCSI_PDU_TX_FRAMES_SENT,
+ DRV_TLV_ISCSI_PDU_TX_BYTES_SENT,
+ DRV_TLV_RDMA_DRV_VERSION
+};
+
+#define I2C_DEV_ADDR_A2 0xa2
+#define SFP_EEPROM_A2_TEMPERATURE_ADDR 0x60
+#define SFP_EEPROM_A2_TEMPERATURE_SIZE 2
+#define SFP_EEPROM_A2_VCC_ADDR 0x62
+#define SFP_EEPROM_A2_VCC_SIZE 2
+#define SFP_EEPROM_A2_TX_BIAS_ADDR 0x64
+#define SFP_EEPROM_A2_TX_BIAS_SIZE 2
+#define SFP_EEPROM_A2_TX_POWER_ADDR 0x66
+#define SFP_EEPROM_A2_TX_POWER_SIZE 2
+#define SFP_EEPROM_A2_RX_POWER_ADDR 0x68
+#define SFP_EEPROM_A2_RX_POWER_SIZE 2
+
+#define I2C_DEV_ADDR_A0 0xa0
+#define QSFP_EEPROM_A0_TEMPERATURE_ADDR 0x16
+#define QSFP_EEPROM_A0_TEMPERATURE_SIZE 2
+#define QSFP_EEPROM_A0_VCC_ADDR 0x1a
+#define QSFP_EEPROM_A0_VCC_SIZE 2
+#define QSFP_EEPROM_A0_TX1_BIAS_ADDR 0x2a
+#define QSFP_EEPROM_A0_TX1_BIAS_SIZE 2
+#define QSFP_EEPROM_A0_TX1_POWER_ADDR 0x32
+#define QSFP_EEPROM_A0_TX1_POWER_SIZE 2
+#define QSFP_EEPROM_A0_RX1_POWER_ADDR 0x22
+#define QSFP_EEPROM_A0_RX1_POWER_SIZE 2
+
+struct nvm_cfg_mac_address {
+ u32 mac_addr_hi;
+#define NVM_CFG_MAC_ADDRESS_HI_MASK 0x0000ffff
+#define NVM_CFG_MAC_ADDRESS_HI_OFFSET 0
+
+ u32 mac_addr_lo;
+};
+
+struct nvm_cfg1_glob {
+ u32 generic_cont0;
+#define NVM_CFG1_GLOB_MF_MODE_MASK 0x00000ff0
+#define NVM_CFG1_GLOB_MF_MODE_OFFSET 4
+#define NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED 0x0
+#define NVM_CFG1_GLOB_MF_MODE_DEFAULT 0x1
+#define NVM_CFG1_GLOB_MF_MODE_SPIO4 0x2
+#define NVM_CFG1_GLOB_MF_MODE_NPAR1_0 0x3
+#define NVM_CFG1_GLOB_MF_MODE_NPAR1_5 0x4
+#define NVM_CFG1_GLOB_MF_MODE_NPAR2_0 0x5
+#define NVM_CFG1_GLOB_MF_MODE_BD 0x6
+#define NVM_CFG1_GLOB_MF_MODE_UFP 0x7
+
+ u32 engineering_change[3];
+ u32 manufacturing_id;
+ u32 serial_number[4];
+ u32 pcie_cfg;
+ u32 mgmt_traffic;
+
+ u32 core_cfg;
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK 0x000000ff
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET 0
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G 0x0
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G 0x1
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G 0x2
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F 0x3
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E 0x4
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G 0x5
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G 0xb
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G 0xc
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G 0xd
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G 0xe
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G 0xf
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_2X50G_R1 0x11
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_4X50G_R1 0x12
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_1X100G_R2 0x13
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_2X100G_R2 0x14
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_1X100G_R4 0x15
+
+ u32 e_lane_cfg1;
+ u32 e_lane_cfg2;
+ u32 f_lane_cfg1;
+ u32 f_lane_cfg2;
+ u32 mps10_preemphasis;
+ u32 mps10_driver_current;
+ u32 mps25_preemphasis;
+ u32 mps25_driver_current;
+ u32 pci_id;
+ u32 pci_subsys_id;
+ u32 bar;
+ u32 mps10_txfir_main;
+ u32 mps10_txfir_post;
+ u32 mps25_txfir_main;
+ u32 mps25_txfir_post;
+ u32 manufacture_ver;
+ u32 manufacture_time;
+ u32 led_global_settings;
+ u32 generic_cont1;
+
+ u32 mbi_version;
+#define NVM_CFG1_GLOB_MBI_VERSION_0_MASK 0x000000ff
+#define NVM_CFG1_GLOB_MBI_VERSION_0_OFFSET 0
+#define NVM_CFG1_GLOB_MBI_VERSION_1_MASK 0x0000ff00
+#define NVM_CFG1_GLOB_MBI_VERSION_1_OFFSET 8
+#define NVM_CFG1_GLOB_MBI_VERSION_2_MASK 0x00ff0000
+#define NVM_CFG1_GLOB_MBI_VERSION_2_OFFSET 16
+
+ u32 mbi_date;
+ u32 misc_sig;
+
+ u32 device_capabilities;
+#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET 0x1
+#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE 0x2
+#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI 0x4
+#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE 0x8
+#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_IWARP 0x10
+
+ u32 power_dissipated;
+ u32 power_consumed;
+ u32 efi_version;
+ u32 multi_network_modes_capability;
+ u32 nvm_cfg_version;
+ u32 nvm_cfg_new_option_seq;
+ u32 nvm_cfg_removed_option_seq;
+ u32 nvm_cfg_updated_value_seq;
+ u32 extended_serial_number[8];
+ u32 option_kit_pn[8];
+ u32 spare_pn[8];
+ u32 mps25_active_txfir_pre;
+ u32 mps25_active_txfir_main;
+ u32 mps25_active_txfir_post;
+ u32 features;
+ u32 tx_rx_eq_25g_hlpc;
+ u32 tx_rx_eq_25g_llpc;
+ u32 tx_rx_eq_25g_ac;
+ u32 tx_rx_eq_10g_pc;
+ u32 tx_rx_eq_10g_ac;
+ u32 tx_rx_eq_1g;
+ u32 tx_rx_eq_25g_bt;
+ u32 tx_rx_eq_10g_bt;
+ u32 generic_cont4;
+ u32 preboot_debug_mode_std;
+ u32 preboot_debug_mode_ext;
+ u32 ext_phy_cfg1;
+ u32 clocks;
+ u32 pre2_generic_cont_1;
+ u32 pre2_generic_cont_2;
+ u32 pre2_generic_cont_3;
+ u32 tx_rx_eq_50g_hlpc;
+ u32 tx_rx_eq_50g_mlpc;
+ u32 tx_rx_eq_50g_llpc;
+ u32 tx_rx_eq_50g_ac;
+ u32 trace_modules;
+ u32 pcie_class_code_fcoe;
+ u32 pcie_class_code_iscsi;
+ u32 no_provisioned_mac;
+ u32 lowest_mbi_version;
+ u32 generic_cont5;
+ u32 pre2_generic_cont_4;
+ u32 reserved[40];
+};
+
+struct nvm_cfg1_path {
+ u32 reserved[1];
+};
+
+struct nvm_cfg1_port {
+ u32 rel_to_opt123;
+ u32 rel_to_opt124;
+
+ u32 generic_cont0;
+#define NVM_CFG1_PORT_DCBX_MODE_MASK 0x000f0000
+#define NVM_CFG1_PORT_DCBX_MODE_OFFSET 16
+#define NVM_CFG1_PORT_DCBX_MODE_DISABLED 0x0
+#define NVM_CFG1_PORT_DCBX_MODE_IEEE 0x1
+#define NVM_CFG1_PORT_DCBX_MODE_CEE 0x2
+#define NVM_CFG1_PORT_DCBX_MODE_DYNAMIC 0x3
+#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_MASK 0x00f00000
+#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_OFFSET 20
+#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ETHERNET 0x1
+#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_FCOE 0x2
+#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ISCSI 0x4
+
+ u32 pcie_cfg;
+ u32 features;
+
+ u32 speed_cap_mask;
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK 0x0000ffff
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_OFFSET 0
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G 0x1
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G 0x2
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G 0x4
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G 0x8
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G 0x10
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G 0x20
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G 0x40
+
+ u32 link_settings;
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_MASK 0x0000000f
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET 0
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG 0x0
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_1G 0x1
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_10G 0x2
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_20G 0x3
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_25G 0x4
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_40G 0x5
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_50G 0x6
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G 0x7
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_SMARTLINQ 0x8
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK 0x00000070
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET 4
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG 0x1
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX 0x2
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX 0x4
+#define NVM_CFG1_PORT_FEC_FORCE_MODE_MASK 0x000e0000
+#define NVM_CFG1_PORT_FEC_FORCE_MODE_OFFSET 17
+#define NVM_CFG1_PORT_FEC_FORCE_MODE_NONE 0x0
+#define NVM_CFG1_PORT_FEC_FORCE_MODE_FIRECODE 0x1
+#define NVM_CFG1_PORT_FEC_FORCE_MODE_RS 0x2
+#define NVM_CFG1_PORT_FEC_FORCE_MODE_AUTO 0x7
+
+ u32 phy_cfg;
+ u32 mgmt_traffic;
+
+ u32 ext_phy;
+ /* EEE power saving mode */
+#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_MASK 0x00ff0000
+#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_OFFSET 16
+#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_DISABLED 0x0
+#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_BALANCED 0x1
+#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_AGGRESSIVE 0x2
+#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_LOW_LATENCY 0x3
+
+ u32 mba_cfg1;
+ u32 mba_cfg2;
+ u32 vf_cfg;
+ struct nvm_cfg_mac_address lldp_mac_address;
+ u32 led_port_settings;
+ u32 transceiver_00;
+ u32 device_ids;
+
+ u32 board_cfg;
+#define NVM_CFG1_PORT_PORT_TYPE_MASK 0x000000ff
+#define NVM_CFG1_PORT_PORT_TYPE_OFFSET 0
+#define NVM_CFG1_PORT_PORT_TYPE_UNDEFINED 0x0
+#define NVM_CFG1_PORT_PORT_TYPE_MODULE 0x1
+#define NVM_CFG1_PORT_PORT_TYPE_BACKPLANE 0x2
+#define NVM_CFG1_PORT_PORT_TYPE_EXT_PHY 0x3
+#define NVM_CFG1_PORT_PORT_TYPE_MODULE_SLAVE 0x4
+
+ u32 mnm_10g_cap;
+ u32 mnm_10g_ctrl;
+ u32 mnm_10g_misc;
+ u32 mnm_25g_cap;
+ u32 mnm_25g_ctrl;
+ u32 mnm_25g_misc;
+ u32 mnm_40g_cap;
+ u32 mnm_40g_ctrl;
+ u32 mnm_40g_misc;
+ u32 mnm_50g_cap;
+ u32 mnm_50g_ctrl;
+ u32 mnm_50g_misc;
+ u32 mnm_100g_cap;
+ u32 mnm_100g_ctrl;
+ u32 mnm_100g_misc;
+
+ u32 temperature;
+ u32 ext_phy_cfg1;
+
+ u32 extended_speed;
+#define NVM_CFG1_PORT_EXTENDED_SPEED_MASK 0x0000ffff
+#define NVM_CFG1_PORT_EXTENDED_SPEED_OFFSET 0
+#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_AN 0x1
+#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_1G 0x2
+#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_10G 0x4
+#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_20G 0x8
+#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_25G 0x10
+#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_40G 0x20
+#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_50G_R 0x40
+#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_50G_R2 0x80
+#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_R2 0x100
+#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_R4 0x200
+#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_P4 0x400
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_MASK 0xffff0000
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_OFFSET 16
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_RESERVED 0x1
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_1G 0x2
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_10G 0x4
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_20G 0x8
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_25G 0x10
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_40G 0x20
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_50G_R 0x40
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_50G_R2 0x80
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_R2 0x100
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_R4 0x200
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_P4 0x400
+
+ u32 extended_fec_mode;
+ u32 port_generic_cont_01;
+ u32 port_generic_cont_02;
+ u32 phy_temp_monitor;
+ u32 reserved[109];
+};
+
+struct nvm_cfg1_func {
+ struct nvm_cfg_mac_address mac_address;
+ u32 rsrv1;
+ u32 rsrv2;
+ u32 device_id;
+ u32 cmn_cfg;
+ u32 pci_cfg;
+ struct nvm_cfg_mac_address fcoe_node_wwn_mac_addr;
+ struct nvm_cfg_mac_address fcoe_port_wwn_mac_addr;
+ u32 preboot_generic_cfg;
+ u32 features;
+ u32 mf_mode_feature;
+ u32 reserved[6];
+};
+
+struct nvm_cfg1 {
+ struct nvm_cfg1_glob glob;
+ struct nvm_cfg1_path path[MCP_GLOB_PATH_MAX];
+ struct nvm_cfg1_port port[MCP_GLOB_PORT_MAX];
+ struct nvm_cfg1_func func[MCP_GLOB_FUNC_MAX];
+};
+
+struct board_info {
+ u16 vendor_id;
+ u16 eth_did_suffix;
+ u16 sub_vendor_id;
+ u16 sub_device_id;
+ char *board_name;
+ char *friendly_name;
+};
+
+struct trace_module_info {
+ char *module_name;
+};
+
+#define NUM_TRACE_MODULES 25
+
+enum nvm_cfg_sections {
+ NVM_CFG_SECTION_NVM_CFG1,
+ NVM_CFG_SECTION_MAX
+};
+
+struct nvm_cfg {
+ u32 num_sections;
+ u32 sections_offset[NVM_CFG_SECTION_MAX];
+ struct nvm_cfg1 cfg1;
+};
+
+#define PORT_0 0
+#define PORT_1 1
+#define PORT_2 2
+#define PORT_3 3
+
+extern struct spad_layout g_spad;
+struct spad_layout {
+ struct nvm_cfg nvm_cfg;
+ struct mcp_public_data public_data;
+};
+
+#define MCP_SPAD_SIZE 0x00028000 /* 160 KB */
+
+#define SPAD_OFFSET(addr) (((u32)(addr) - (u32)CPU_SPAD_BASE))
+
+#define TO_OFFSIZE(_offset, _size) \
+ ((u32)((((u32)(_offset) >> 2) << OFFSIZE_OFFSET_OFFSET) | \
+ (((u32)(_size) >> 2) << OFFSIZE_SIZE_OFFSET)))
+
+enum spad_sections {
+ SPAD_SECTION_TRACE,
+ SPAD_SECTION_NVM_CFG,
+ SPAD_SECTION_PUBLIC,
+ SPAD_SECTION_PRIVATE,
+ SPAD_SECTION_MAX
+};
+
+#define STRUCT_OFFSET(f) (STATIC_INIT_BASE + \
+ __builtin_offsetof(struct static_init, f))
+
+/* This section is located at a fixed location in the beginning of the
+ * scratchpad, to ensure that the MCP trace is not run over during MFW upgrade.
+ * All the rest of data has a floating location which differs from version to
+ * version, and is pointed by the mcp_meta_data below.
+ * Moreover, the spad_layout section is part of the MFW firmware, and is loaded
+ * with it from nvram in order to clear this portion.
+ */
+struct static_init {
+ u32 num_sections;
+ offsize_t sections[SPAD_SECTION_MAX];
+#define SECTION(_sec_) (*((offsize_t *)(STRUCT_OFFSET(sections[_sec_]))))
+
+ u32 tim_hash[8];
+#define PRESERVED_TIM_HASH ((u8 *)(STRUCT_OFFSET(tim_hash)))
+ u32 tpu_hash[8];
+#define PRESERVED_TPU_HASH ((u8 *)(STRUCT_OFFSET(tpu_hash)))
+ u32 secure_pcie_fw_ver;
+#define SECURE_PCIE_FW_VER (*((u32 *)(STRUCT_OFFSET(secure_pcie_fw_ver))))
+ u32 secure_running_mfw;
+#define SECURE_RUNNING_MFW (*((u32 *)(STRUCT_OFFSET(secure_running_mfw))))
+ struct mcp_trace trace;
+};
+
+#define CRC_MAGIC_VALUE 0xDEBB20E3
+#define CRC32_POLYNOMIAL 0xEDB88320
+#define _KB(x) ((x) * 1024)
+#define _MB(x) (_KB(x) * 1024)
+#define NVM_CRC_SIZE (sizeof(u32))
+enum nvm_sw_arbitrator {
+ NVM_SW_ARB_HOST,
+ NVM_SW_ARB_MCP,
+ NVM_SW_ARB_UART,
+ NVM_SW_ARB_RESERVED
+};
+
+struct legacy_bootstrap_region {
+ u32 magic_value;
+#define NVM_MAGIC_VALUE 0x669955aa
+ u32 sram_start_addr;
+ u32 code_len;
+ u32 code_start_addr;
+ u32 crc;
+};
+
+struct nvm_code_entry {
+ u32 image_type;
+ u32 nvm_start_addr;
+ u32 len;
+ u32 sram_start_addr;
+ u32 sram_run_addr;
+};
+
+enum nvm_image_type {
+ NVM_TYPE_TIM1 = 0x01,
+ NVM_TYPE_TIM2 = 0x02,
+ NVM_TYPE_MIM1 = 0x03,
+ NVM_TYPE_MIM2 = 0x04,
+ NVM_TYPE_MBA = 0x05,
+ NVM_TYPE_MODULES_PN = 0x06,
+ NVM_TYPE_VPD = 0x07,
+ NVM_TYPE_MFW_TRACE1 = 0x08,
+ NVM_TYPE_MFW_TRACE2 = 0x09,
+ NVM_TYPE_NVM_CFG1 = 0x0a,
+ NVM_TYPE_L2B = 0x0b,
+ NVM_TYPE_DIR1 = 0x0c,
+ NVM_TYPE_EAGLE_FW1 = 0x0d,
+ NVM_TYPE_FALCON_FW1 = 0x0e,
+ NVM_TYPE_PCIE_FW1 = 0x0f,
+ NVM_TYPE_HW_SET = 0x10,
+ NVM_TYPE_LIM = 0x11,
+ NVM_TYPE_AVS_FW1 = 0x12,
+ NVM_TYPE_DIR2 = 0x13,
+ NVM_TYPE_CCM = 0x14,
+ NVM_TYPE_EAGLE_FW2 = 0x15,
+ NVM_TYPE_FALCON_FW2 = 0x16,
+ NVM_TYPE_PCIE_FW2 = 0x17,
+ NVM_TYPE_AVS_FW2 = 0x18,
+ NVM_TYPE_INIT_HW = 0x19,
+ NVM_TYPE_DEFAULT_CFG = 0x1a,
+ NVM_TYPE_MDUMP = 0x1b,
+ NVM_TYPE_NVM_META = 0x1c,
+ NVM_TYPE_ISCSI_CFG = 0x1d,
+ NVM_TYPE_FCOE_CFG = 0x1f,
+ NVM_TYPE_ETH_PHY_FW1 = 0x20,
+ NVM_TYPE_ETH_PHY_FW2 = 0x21,
+ NVM_TYPE_BDN = 0x22,
+ NVM_TYPE_8485X_PHY_FW = 0x23,
+ NVM_TYPE_PUB_KEY = 0x24,
+ NVM_TYPE_RECOVERY = 0x25,
+ NVM_TYPE_PLDM = 0x26,
+ NVM_TYPE_UPK1 = 0x27,
+ NVM_TYPE_UPK2 = 0x28,
+ NVM_TYPE_MASTER_KC = 0x29,
+ NVM_TYPE_BACKUP_KC = 0x2a,
+ NVM_TYPE_HW_DUMP = 0x2b,
+ NVM_TYPE_HW_DUMP_OUT = 0x2c,
+ NVM_TYPE_BIN_NVM_META = 0x30,
+ NVM_TYPE_ROM_TEST = 0xf0,
+ NVM_TYPE_88X33X0_PHY_FW = 0x31,
+ NVM_TYPE_88X33X0_PHY_SLAVE_FW = 0x32,
+ NVM_TYPE_IDLE_CHK = 0x33,
+ NVM_TYPE_MAX,
+};
+
+#define MAX_NVM_DIR_ENTRIES 100
+
+struct nvm_dir_meta {
+ u32 dir_id;
+ u32 nvm_dir_addr;
+ u32 num_images;
+ u32 next_mfw_to_run;
+};
+
+struct nvm_dir {
+ s32 seq;
+#define NVM_DIR_NEXT_MFW_MASK 0x00000001
+#define NVM_DIR_SEQ_MASK 0xfffffffe
+#define NVM_DIR_NEXT_MFW(seq) ((seq) & NVM_DIR_NEXT_MFW_MASK)
+#define NVM_DIR_UPDATE_SEQ(_seq, swap_mfw)\
+ ({ \
+ _seq = (((_seq + 2) & \
+ NVM_DIR_SEQ_MASK) | \
+ (NVM_DIR_NEXT_MFW(_seq ^ (swap_mfw))));\
+ })
+
+#define IS_DIR_SEQ_VALID(seq) (((seq) & NVM_DIR_SEQ_MASK) != \
+ NVM_DIR_SEQ_MASK)
+
+ u32 num_images;
+ u32 rsrv;
+ struct nvm_code_entry code[1]; /* Up to MAX_NVM_DIR_ENTRIES */
+};
+
+#define NVM_DIR_SIZE(_num_images) (sizeof(struct nvm_dir) + \
+ ((_num_images) - 1) *\
+ sizeof(struct nvm_code_entry) +\
+ NVM_CRC_SIZE)
+
+struct nvm_vpd_image {
+ u32 format_revision;
+#define VPD_IMAGE_VERSION 1
+
+ u8 vpd_data[1];
+};
+
+#define DIR_ID_1 (0)
+#define DIR_ID_2 (1)
+#define MAX_DIR_IDS (2)
+
+#define MFW_BUNDLE_1 (0)
+#define MFW_BUNDLE_2 (1)
+#define MAX_MFW_BUNDLES (2)
+
+#define FLASH_PAGE_SIZE 0x1000
+#define NVM_DIR_MAX_SIZE (FLASH_PAGE_SIZE)
+#define LEGACY_ASIC_MIM_MAX_SIZE (_KB(1200))
+
+#define FPGA_MIM_MAX_SIZE (0x40000)
+
+#define LIM_MAX_SIZE ((2 * FLASH_PAGE_SIZE) - \
+ sizeof(struct legacy_bootstrap_region) \
+ - NVM_RSV_SIZE)
+#define LIM_OFFSET (NVM_OFFSET(lim_image))
+#define NVM_RSV_SIZE (44)
+#define GET_MIM_MAX_SIZE(is_asic, is_e4) (LEGACY_ASIC_MIM_MAX_SIZE)
+#define GET_MIM_OFFSET(idx, is_asic, is_e4) (NVM_OFFSET(dir[MAX_MFW_BUNDLES])\
+ + (((idx) == NVM_TYPE_MIM2) ? \
+ GET_MIM_MAX_SIZE(is_asic, is_e4)\
+ : 0))
+#define GET_NVM_FIXED_AREA_SIZE(is_asic, is_e4) (sizeof(struct nvm_image) + \
+ GET_MIM_MAX_SIZE(is_asic,\
+ is_e4) * 2)
+
+union nvm_dir_union {
+ struct nvm_dir dir;
+ u8 page[FLASH_PAGE_SIZE];
+};
+
+struct nvm_image {
+ struct legacy_bootstrap_region bootstrap;
+ u8 rsrv[NVM_RSV_SIZE];
+ u8 lim_image[LIM_MAX_SIZE];
+ union nvm_dir_union dir[MAX_MFW_BUNDLES];
+};
+
+#define NVM_OFFSET(f) ((u32_t)((int_ptr_t)(&(((struct nvm_image *)0)->(f)))))
+
+struct hw_set_info {
+ u32 reg_type;
+#define GRC_REG_TYPE 1
+#define PHY_REG_TYPE 2
+#define PCI_REG_TYPE 4
+
+ u32 bank_num;
+ u32 pf_num;
+ u32 operation;
+#define READ_OP 1
+#define WRITE_OP 2
+#define RMW_SET_OP 3
+#define RMW_CLR_OP 4
+
+ u32 reg_addr;
+ u32 reg_data;
+
+ u32 reset_type;
+#define POR_RESET_TYPE BIT(0)
+#define HARD_RESET_TYPE BIT(1)
+#define CORE_RESET_TYPE BIT(2)
+#define MCP_RESET_TYPE BIT(3)
+#define PERSET_ASSERT BIT(4)
+#define PERSET_DEASSERT BIT(5)
+};
+
+struct hw_set_image {
+ u32 format_version;
+#define HW_SET_IMAGE_VERSION 1
+ u32 no_hw_sets;
+ struct hw_set_info hw_sets[1];
+};
+
+#define MAX_SUPPORTED_NVM_OPTIONS 1000
+
+#define NVM_META_BIN_OPTION_OFFSET_MASK 0x0000ffff
+#define NVM_META_BIN_OPTION_OFFSET_SHIFT 0
+#define NVM_META_BIN_OPTION_LEN_MASK 0x00ff0000
+#define NVM_META_BIN_OPTION_LEN_OFFSET 16
+#define NVM_META_BIN_OPTION_ENTITY_MASK 0x03000000
+#define NVM_META_BIN_OPTION_ENTITY_SHIFT 24
+#define NVM_META_BIN_OPTION_ENTITY_GLOB 0
+#define NVM_META_BIN_OPTION_ENTITY_PORT 1
+#define NVM_META_BIN_OPTION_ENTITY_FUNC 2
+#define NVM_META_BIN_OPTION_CONFIG_TYPE_MASK 0x0c000000
+#define NVM_META_BIN_OPTION_CONFIG_TYPE_SHIFT 26
+#define NVM_META_BIN_OPTION_CONFIG_TYPE_USER 0
+#define NVM_META_BIN_OPTION_CONFIG_TYPE_FIXED 1
+#define NVM_META_BIN_OPTION_CONFIG_TYPE_FORCED 2
+
+struct nvm_meta_bin_t {
+ u32 magic;
+#define NVM_META_BIN_MAGIC 0x669955bb
+ u32 version;
+#define NVM_META_BIN_VERSION 1
+ u32 num_options;
+ u32 options[];
+};
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c
new file mode 100644
index 000000000..f55eed092
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c
@@ -0,0 +1,1340 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/* Copyright (c) 2019-2020 Marvell International Ltd. */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/bug.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+#include "qed.h"
+#include "qed_hw.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+
+#define TLV_TYPE(p) (p[0])
+#define TLV_LENGTH(p) (p[1])
+#define TLV_FLAGS(p) (p[3])
+
+#define QED_TLV_DATA_MAX (14)
+struct qed_tlv_parsed_buf {
+ /* To be filled with the address to set in Value field */
+ void *p_val;
+
+ /* To be used internally in case the value has to be modified */
+ u8 data[QED_TLV_DATA_MAX];
+};
+
+static int qed_mfw_get_tlv_group(u8 tlv_type, u8 *tlv_group)
+{
+ switch (tlv_type) {
+ case DRV_TLV_FEATURE_FLAGS:
+ case DRV_TLV_LOCAL_ADMIN_ADDR:
+ case DRV_TLV_ADDITIONAL_MAC_ADDR_1:
+ case DRV_TLV_ADDITIONAL_MAC_ADDR_2:
+ case DRV_TLV_OS_DRIVER_STATES:
+ case DRV_TLV_PXE_BOOT_PROGRESS:
+ case DRV_TLV_RX_FRAMES_RECEIVED:
+ case DRV_TLV_RX_BYTES_RECEIVED:
+ case DRV_TLV_TX_FRAMES_SENT:
+ case DRV_TLV_TX_BYTES_SENT:
+ case DRV_TLV_NPIV_ENABLED:
+ case DRV_TLV_PCIE_BUS_RX_UTILIZATION:
+ case DRV_TLV_PCIE_BUS_TX_UTILIZATION:
+ case DRV_TLV_DEVICE_CPU_CORES_UTILIZATION:
+ case DRV_TLV_LAST_VALID_DCC_TLV_RECEIVED:
+ case DRV_TLV_NCSI_RX_BYTES_RECEIVED:
+ case DRV_TLV_NCSI_TX_BYTES_SENT:
+ *tlv_group |= QED_MFW_TLV_GENERIC;
+ break;
+ case DRV_TLV_LSO_MAX_OFFLOAD_SIZE:
+ case DRV_TLV_LSO_MIN_SEGMENT_COUNT:
+ case DRV_TLV_PROMISCUOUS_MODE:
+ case DRV_TLV_TX_DESCRIPTORS_QUEUE_SIZE:
+ case DRV_TLV_RX_DESCRIPTORS_QUEUE_SIZE:
+ case DRV_TLV_NUM_OF_NET_QUEUE_VMQ_CFG:
+ case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV4:
+ case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV6:
+ case DRV_TLV_TX_DESCRIPTOR_QUEUE_AVG_DEPTH:
+ case DRV_TLV_RX_DESCRIPTORS_QUEUE_AVG_DEPTH:
+ case DRV_TLV_IOV_OFFLOAD:
+ case DRV_TLV_TX_QUEUES_EMPTY:
+ case DRV_TLV_RX_QUEUES_EMPTY:
+ case DRV_TLV_TX_QUEUES_FULL:
+ case DRV_TLV_RX_QUEUES_FULL:
+ *tlv_group |= QED_MFW_TLV_ETH;
+ break;
+ case DRV_TLV_SCSI_TO:
+ case DRV_TLV_R_T_TOV:
+ case DRV_TLV_R_A_TOV:
+ case DRV_TLV_E_D_TOV:
+ case DRV_TLV_CR_TOV:
+ case DRV_TLV_BOOT_TYPE:
+ case DRV_TLV_NPIV_STATE:
+ case DRV_TLV_NUM_OF_NPIV_IDS:
+ case DRV_TLV_SWITCH_NAME:
+ case DRV_TLV_SWITCH_PORT_NUM:
+ case DRV_TLV_SWITCH_PORT_ID:
+ case DRV_TLV_VENDOR_NAME:
+ case DRV_TLV_SWITCH_MODEL:
+ case DRV_TLV_SWITCH_FW_VER:
+ case DRV_TLV_QOS_PRIORITY_PER_802_1P:
+ case DRV_TLV_PORT_ALIAS:
+ case DRV_TLV_PORT_STATE:
+ case DRV_TLV_FIP_TX_DESCRIPTORS_QUEUE_SIZE:
+ case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_SIZE:
+ case DRV_TLV_LINK_FAILURE_COUNT:
+ case DRV_TLV_FCOE_BOOT_PROGRESS:
+ case DRV_TLV_RX_BROADCAST_PACKETS:
+ case DRV_TLV_TX_BROADCAST_PACKETS:
+ case DRV_TLV_FCOE_TX_DESCRIPTOR_QUEUE_AVG_DEPTH:
+ case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_AVG_DEPTH:
+ case DRV_TLV_FCOE_RX_FRAMES_RECEIVED:
+ case DRV_TLV_FCOE_RX_BYTES_RECEIVED:
+ case DRV_TLV_FCOE_TX_FRAMES_SENT:
+ case DRV_TLV_FCOE_TX_BYTES_SENT:
+ case DRV_TLV_CRC_ERROR_COUNT:
+ case DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_CRC_ERROR_1_TIMESTAMP:
+ case DRV_TLV_CRC_ERROR_2_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_CRC_ERROR_2_TIMESTAMP:
+ case DRV_TLV_CRC_ERROR_3_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_CRC_ERROR_3_TIMESTAMP:
+ case DRV_TLV_CRC_ERROR_4_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_CRC_ERROR_4_TIMESTAMP:
+ case DRV_TLV_CRC_ERROR_5_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_CRC_ERROR_5_TIMESTAMP:
+ case DRV_TLV_LOSS_OF_SYNC_ERROR_COUNT:
+ case DRV_TLV_LOSS_OF_SIGNAL_ERRORS:
+ case DRV_TLV_PRIMITIVE_SEQUENCE_PROTOCOL_ERROR_COUNT:
+ case DRV_TLV_DISPARITY_ERROR_COUNT:
+ case DRV_TLV_CODE_VIOLATION_ERROR_COUNT:
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1:
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_2:
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_3:
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_4:
+ case DRV_TLV_LAST_FLOGI_TIMESTAMP:
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1:
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_2:
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_3:
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_4:
+ case DRV_TLV_LAST_FLOGI_ACC_TIMESTAMP:
+ case DRV_TLV_LAST_FLOGI_RJT:
+ case DRV_TLV_LAST_FLOGI_RJT_TIMESTAMP:
+ case DRV_TLV_FDISCS_SENT_COUNT:
+ case DRV_TLV_FDISC_ACCS_RECEIVED:
+ case DRV_TLV_FDISC_RJTS_RECEIVED:
+ case DRV_TLV_PLOGI_SENT_COUNT:
+ case DRV_TLV_PLOGI_ACCS_RECEIVED:
+ case DRV_TLV_PLOGI_RJTS_RECEIVED:
+ case DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_PLOGI_1_TIMESTAMP:
+ case DRV_TLV_PLOGI_2_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_PLOGI_2_TIMESTAMP:
+ case DRV_TLV_PLOGI_3_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_PLOGI_3_TIMESTAMP:
+ case DRV_TLV_PLOGI_4_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_PLOGI_4_TIMESTAMP:
+ case DRV_TLV_PLOGI_5_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_PLOGI_5_TIMESTAMP:
+ case DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_PLOGI_1_ACC_TIMESTAMP:
+ case DRV_TLV_PLOGI_2_ACC_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_PLOGI_2_ACC_TIMESTAMP:
+ case DRV_TLV_PLOGI_3_ACC_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_PLOGI_3_ACC_TIMESTAMP:
+ case DRV_TLV_PLOGI_4_ACC_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_PLOGI_4_ACC_TIMESTAMP:
+ case DRV_TLV_PLOGI_5_ACC_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_PLOGI_5_ACC_TIMESTAMP:
+ case DRV_TLV_LOGOS_ISSUED:
+ case DRV_TLV_LOGO_ACCS_RECEIVED:
+ case DRV_TLV_LOGO_RJTS_RECEIVED:
+ case DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_LOGO_1_TIMESTAMP:
+ case DRV_TLV_LOGO_2_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_LOGO_2_TIMESTAMP:
+ case DRV_TLV_LOGO_3_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_LOGO_3_TIMESTAMP:
+ case DRV_TLV_LOGO_4_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_LOGO_4_TIMESTAMP:
+ case DRV_TLV_LOGO_5_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_LOGO_5_TIMESTAMP:
+ case DRV_TLV_LOGOS_RECEIVED:
+ case DRV_TLV_ACCS_ISSUED:
+ case DRV_TLV_PRLIS_ISSUED:
+ case DRV_TLV_ACCS_RECEIVED:
+ case DRV_TLV_ABTS_SENT_COUNT:
+ case DRV_TLV_ABTS_ACCS_RECEIVED:
+ case DRV_TLV_ABTS_RJTS_RECEIVED:
+ case DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_ABTS_1_TIMESTAMP:
+ case DRV_TLV_ABTS_2_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_ABTS_2_TIMESTAMP:
+ case DRV_TLV_ABTS_3_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_ABTS_3_TIMESTAMP:
+ case DRV_TLV_ABTS_4_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_ABTS_4_TIMESTAMP:
+ case DRV_TLV_ABTS_5_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_ABTS_5_TIMESTAMP:
+ case DRV_TLV_RSCNS_RECEIVED:
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1:
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_2:
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_3:
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_4:
+ case DRV_TLV_LUN_RESETS_ISSUED:
+ case DRV_TLV_ABORT_TASK_SETS_ISSUED:
+ case DRV_TLV_TPRLOS_SENT:
+ case DRV_TLV_NOS_SENT_COUNT:
+ case DRV_TLV_NOS_RECEIVED_COUNT:
+ case DRV_TLV_OLS_COUNT:
+ case DRV_TLV_LR_COUNT:
+ case DRV_TLV_LRR_COUNT:
+ case DRV_TLV_LIP_SENT_COUNT:
+ case DRV_TLV_LIP_RECEIVED_COUNT:
+ case DRV_TLV_EOFA_COUNT:
+ case DRV_TLV_EOFNI_COUNT:
+ case DRV_TLV_SCSI_STATUS_CHECK_CONDITION_COUNT:
+ case DRV_TLV_SCSI_STATUS_CONDITION_MET_COUNT:
+ case DRV_TLV_SCSI_STATUS_BUSY_COUNT:
+ case DRV_TLV_SCSI_STATUS_INTERMEDIATE_COUNT:
+ case DRV_TLV_SCSI_STATUS_INTERMEDIATE_CONDITION_MET_COUNT:
+ case DRV_TLV_SCSI_STATUS_RESERVATION_CONFLICT_COUNT:
+ case DRV_TLV_SCSI_STATUS_TASK_SET_FULL_COUNT:
+ case DRV_TLV_SCSI_STATUS_ACA_ACTIVE_COUNT:
+ case DRV_TLV_SCSI_STATUS_TASK_ABORTED_COUNT:
+ case DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ:
+ case DRV_TLV_SCSI_CHECK_1_TIMESTAMP:
+ case DRV_TLV_SCSI_CHECK_CONDITION_2_RECEIVED_SK_ASC_ASCQ:
+ case DRV_TLV_SCSI_CHECK_2_TIMESTAMP:
+ case DRV_TLV_SCSI_CHECK_CONDITION_3_RECEIVED_SK_ASC_ASCQ:
+ case DRV_TLV_SCSI_CHECK_3_TIMESTAMP:
+ case DRV_TLV_SCSI_CHECK_CONDITION_4_RECEIVED_SK_ASC_ASCQ:
+ case DRV_TLV_SCSI_CHECK_4_TIMESTAMP:
+ case DRV_TLV_SCSI_CHECK_CONDITION_5_RECEIVED_SK_ASC_ASCQ:
+ case DRV_TLV_SCSI_CHECK_5_TIMESTAMP:
+ *tlv_group = QED_MFW_TLV_FCOE;
+ break;
+ case DRV_TLV_TARGET_LLMNR_ENABLED:
+ case DRV_TLV_HEADER_DIGEST_FLAG_ENABLED:
+ case DRV_TLV_DATA_DIGEST_FLAG_ENABLED:
+ case DRV_TLV_AUTHENTICATION_METHOD:
+ case DRV_TLV_ISCSI_BOOT_TARGET_PORTAL:
+ case DRV_TLV_MAX_FRAME_SIZE:
+ case DRV_TLV_PDU_TX_DESCRIPTORS_QUEUE_SIZE:
+ case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_SIZE:
+ case DRV_TLV_ISCSI_BOOT_PROGRESS:
+ case DRV_TLV_PDU_TX_DESCRIPTOR_QUEUE_AVG_DEPTH:
+ case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_AVG_DEPTH:
+ case DRV_TLV_ISCSI_PDU_RX_FRAMES_RECEIVED:
+ case DRV_TLV_ISCSI_PDU_RX_BYTES_RECEIVED:
+ case DRV_TLV_ISCSI_PDU_TX_FRAMES_SENT:
+ case DRV_TLV_ISCSI_PDU_TX_BYTES_SENT:
+ *tlv_group |= QED_MFW_TLV_ISCSI;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Returns size of the data buffer or, -1 in case TLV data is not available. */
+static int
+qed_mfw_get_gen_tlv_value(struct qed_drv_tlv_hdr *p_tlv,
+ struct qed_mfw_tlv_generic *p_drv_buf,
+ struct qed_tlv_parsed_buf *p_buf)
+{
+ switch (p_tlv->tlv_type) {
+ case DRV_TLV_FEATURE_FLAGS:
+ if (p_drv_buf->flags.b_set) {
+ memset(p_buf->data, 0, sizeof(u8) * QED_TLV_DATA_MAX);
+ p_buf->data[0] = p_drv_buf->flags.ipv4_csum_offload ?
+ 1 : 0;
+ p_buf->data[0] |= (p_drv_buf->flags.lso_supported ?
+ 1 : 0) << 1;
+ p_buf->p_val = p_buf->data;
+ return QED_MFW_TLV_FLAGS_SIZE;
+ }
+ break;
+
+ case DRV_TLV_LOCAL_ADMIN_ADDR:
+ case DRV_TLV_ADDITIONAL_MAC_ADDR_1:
+ case DRV_TLV_ADDITIONAL_MAC_ADDR_2:
+ {
+ int idx = p_tlv->tlv_type - DRV_TLV_LOCAL_ADMIN_ADDR;
+
+ if (p_drv_buf->mac_set[idx]) {
+ p_buf->p_val = p_drv_buf->mac[idx];
+ return ETH_ALEN;
+ }
+ break;
+ }
+
+ case DRV_TLV_RX_FRAMES_RECEIVED:
+ if (p_drv_buf->rx_frames_set) {
+ p_buf->p_val = &p_drv_buf->rx_frames;
+ return sizeof(p_drv_buf->rx_frames);
+ }
+ break;
+ case DRV_TLV_RX_BYTES_RECEIVED:
+ if (p_drv_buf->rx_bytes_set) {
+ p_buf->p_val = &p_drv_buf->rx_bytes;
+ return sizeof(p_drv_buf->rx_bytes);
+ }
+ break;
+ case DRV_TLV_TX_FRAMES_SENT:
+ if (p_drv_buf->tx_frames_set) {
+ p_buf->p_val = &p_drv_buf->tx_frames;
+ return sizeof(p_drv_buf->tx_frames);
+ }
+ break;
+ case DRV_TLV_TX_BYTES_SENT:
+ if (p_drv_buf->tx_bytes_set) {
+ p_buf->p_val = &p_drv_buf->tx_bytes;
+ return sizeof(p_drv_buf->tx_bytes);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return -1;
+}
+
+static int
+qed_mfw_get_eth_tlv_value(struct qed_drv_tlv_hdr *p_tlv,
+ struct qed_mfw_tlv_eth *p_drv_buf,
+ struct qed_tlv_parsed_buf *p_buf)
+{
+ switch (p_tlv->tlv_type) {
+ case DRV_TLV_LSO_MAX_OFFLOAD_SIZE:
+ if (p_drv_buf->lso_maxoff_size_set) {
+ p_buf->p_val = &p_drv_buf->lso_maxoff_size;
+ return sizeof(p_drv_buf->lso_maxoff_size);
+ }
+ break;
+ case DRV_TLV_LSO_MIN_SEGMENT_COUNT:
+ if (p_drv_buf->lso_minseg_size_set) {
+ p_buf->p_val = &p_drv_buf->lso_minseg_size;
+ return sizeof(p_drv_buf->lso_minseg_size);
+ }
+ break;
+ case DRV_TLV_PROMISCUOUS_MODE:
+ if (p_drv_buf->prom_mode_set) {
+ p_buf->p_val = &p_drv_buf->prom_mode;
+ return sizeof(p_drv_buf->prom_mode);
+ }
+ break;
+ case DRV_TLV_TX_DESCRIPTORS_QUEUE_SIZE:
+ if (p_drv_buf->tx_descr_size_set) {
+ p_buf->p_val = &p_drv_buf->tx_descr_size;
+ return sizeof(p_drv_buf->tx_descr_size);
+ }
+ break;
+ case DRV_TLV_RX_DESCRIPTORS_QUEUE_SIZE:
+ if (p_drv_buf->rx_descr_size_set) {
+ p_buf->p_val = &p_drv_buf->rx_descr_size;
+ return sizeof(p_drv_buf->rx_descr_size);
+ }
+ break;
+ case DRV_TLV_NUM_OF_NET_QUEUE_VMQ_CFG:
+ if (p_drv_buf->netq_count_set) {
+ p_buf->p_val = &p_drv_buf->netq_count;
+ return sizeof(p_drv_buf->netq_count);
+ }
+ break;
+ case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV4:
+ if (p_drv_buf->tcp4_offloads_set) {
+ p_buf->p_val = &p_drv_buf->tcp4_offloads;
+ return sizeof(p_drv_buf->tcp4_offloads);
+ }
+ break;
+ case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV6:
+ if (p_drv_buf->tcp6_offloads_set) {
+ p_buf->p_val = &p_drv_buf->tcp6_offloads;
+ return sizeof(p_drv_buf->tcp6_offloads);
+ }
+ break;
+ case DRV_TLV_TX_DESCRIPTOR_QUEUE_AVG_DEPTH:
+ if (p_drv_buf->tx_descr_qdepth_set) {
+ p_buf->p_val = &p_drv_buf->tx_descr_qdepth;
+ return sizeof(p_drv_buf->tx_descr_qdepth);
+ }
+ break;
+ case DRV_TLV_RX_DESCRIPTORS_QUEUE_AVG_DEPTH:
+ if (p_drv_buf->rx_descr_qdepth_set) {
+ p_buf->p_val = &p_drv_buf->rx_descr_qdepth;
+ return sizeof(p_drv_buf->rx_descr_qdepth);
+ }
+ break;
+ case DRV_TLV_IOV_OFFLOAD:
+ if (p_drv_buf->iov_offload_set) {
+ p_buf->p_val = &p_drv_buf->iov_offload;
+ return sizeof(p_drv_buf->iov_offload);
+ }
+ break;
+ case DRV_TLV_TX_QUEUES_EMPTY:
+ if (p_drv_buf->txqs_empty_set) {
+ p_buf->p_val = &p_drv_buf->txqs_empty;
+ return sizeof(p_drv_buf->txqs_empty);
+ }
+ break;
+ case DRV_TLV_RX_QUEUES_EMPTY:
+ if (p_drv_buf->rxqs_empty_set) {
+ p_buf->p_val = &p_drv_buf->rxqs_empty;
+ return sizeof(p_drv_buf->rxqs_empty);
+ }
+ break;
+ case DRV_TLV_TX_QUEUES_FULL:
+ if (p_drv_buf->num_txqs_full_set) {
+ p_buf->p_val = &p_drv_buf->num_txqs_full;
+ return sizeof(p_drv_buf->num_txqs_full);
+ }
+ break;
+ case DRV_TLV_RX_QUEUES_FULL:
+ if (p_drv_buf->num_rxqs_full_set) {
+ p_buf->p_val = &p_drv_buf->num_rxqs_full;
+ return sizeof(p_drv_buf->num_rxqs_full);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return -1;
+}
+
+static int
+qed_mfw_get_tlv_time_value(struct qed_mfw_tlv_time *p_time,
+ struct qed_tlv_parsed_buf *p_buf)
+{
+ if (!p_time->b_set)
+ return -1;
+
+ /* Validate numbers */
+ if (p_time->month > 12)
+ p_time->month = 0;
+ if (p_time->day > 31)
+ p_time->day = 0;
+ if (p_time->hour > 23)
+ p_time->hour = 0;
+ if (p_time->min > 59)
+ p_time->min = 0;
+ if (p_time->msec > 999)
+ p_time->msec = 0;
+ if (p_time->usec > 999)
+ p_time->usec = 0;
+
+ memset(p_buf->data, 0, sizeof(u8) * QED_TLV_DATA_MAX);
+ snprintf(p_buf->data, 14, "%d%d%d%d%d%d",
+ p_time->month, p_time->day,
+ p_time->hour, p_time->min, p_time->msec, p_time->usec);
+
+ p_buf->p_val = p_buf->data;
+
+ return QED_MFW_TLV_TIME_SIZE;
+}
+
+static int
+qed_mfw_get_fcoe_tlv_value(struct qed_drv_tlv_hdr *p_tlv,
+ struct qed_mfw_tlv_fcoe *p_drv_buf,
+ struct qed_tlv_parsed_buf *p_buf)
+{
+ struct qed_mfw_tlv_time *p_time;
+ u8 idx;
+
+ switch (p_tlv->tlv_type) {
+ case DRV_TLV_SCSI_TO:
+ if (p_drv_buf->scsi_timeout_set) {
+ p_buf->p_val = &p_drv_buf->scsi_timeout;
+ return sizeof(p_drv_buf->scsi_timeout);
+ }
+ break;
+ case DRV_TLV_R_T_TOV:
+ if (p_drv_buf->rt_tov_set) {
+ p_buf->p_val = &p_drv_buf->rt_tov;
+ return sizeof(p_drv_buf->rt_tov);
+ }
+ break;
+ case DRV_TLV_R_A_TOV:
+ if (p_drv_buf->ra_tov_set) {
+ p_buf->p_val = &p_drv_buf->ra_tov;
+ return sizeof(p_drv_buf->ra_tov);
+ }
+ break;
+ case DRV_TLV_E_D_TOV:
+ if (p_drv_buf->ed_tov_set) {
+ p_buf->p_val = &p_drv_buf->ed_tov;
+ return sizeof(p_drv_buf->ed_tov);
+ }
+ break;
+ case DRV_TLV_CR_TOV:
+ if (p_drv_buf->cr_tov_set) {
+ p_buf->p_val = &p_drv_buf->cr_tov;
+ return sizeof(p_drv_buf->cr_tov);
+ }
+ break;
+ case DRV_TLV_BOOT_TYPE:
+ if (p_drv_buf->boot_type_set) {
+ p_buf->p_val = &p_drv_buf->boot_type;
+ return sizeof(p_drv_buf->boot_type);
+ }
+ break;
+ case DRV_TLV_NPIV_STATE:
+ if (p_drv_buf->npiv_state_set) {
+ p_buf->p_val = &p_drv_buf->npiv_state;
+ return sizeof(p_drv_buf->npiv_state);
+ }
+ break;
+ case DRV_TLV_NUM_OF_NPIV_IDS:
+ if (p_drv_buf->num_npiv_ids_set) {
+ p_buf->p_val = &p_drv_buf->num_npiv_ids;
+ return sizeof(p_drv_buf->num_npiv_ids);
+ }
+ break;
+ case DRV_TLV_SWITCH_NAME:
+ if (p_drv_buf->switch_name_set) {
+ p_buf->p_val = &p_drv_buf->switch_name;
+ return sizeof(p_drv_buf->switch_name);
+ }
+ break;
+ case DRV_TLV_SWITCH_PORT_NUM:
+ if (p_drv_buf->switch_portnum_set) {
+ p_buf->p_val = &p_drv_buf->switch_portnum;
+ return sizeof(p_drv_buf->switch_portnum);
+ }
+ break;
+ case DRV_TLV_SWITCH_PORT_ID:
+ if (p_drv_buf->switch_portid_set) {
+ p_buf->p_val = &p_drv_buf->switch_portid;
+ return sizeof(p_drv_buf->switch_portid);
+ }
+ break;
+ case DRV_TLV_VENDOR_NAME:
+ if (p_drv_buf->vendor_name_set) {
+ p_buf->p_val = &p_drv_buf->vendor_name;
+ return sizeof(p_drv_buf->vendor_name);
+ }
+ break;
+ case DRV_TLV_SWITCH_MODEL:
+ if (p_drv_buf->switch_model_set) {
+ p_buf->p_val = &p_drv_buf->switch_model;
+ return sizeof(p_drv_buf->switch_model);
+ }
+ break;
+ case DRV_TLV_SWITCH_FW_VER:
+ if (p_drv_buf->switch_fw_version_set) {
+ p_buf->p_val = &p_drv_buf->switch_fw_version;
+ return sizeof(p_drv_buf->switch_fw_version);
+ }
+ break;
+ case DRV_TLV_QOS_PRIORITY_PER_802_1P:
+ if (p_drv_buf->qos_pri_set) {
+ p_buf->p_val = &p_drv_buf->qos_pri;
+ return sizeof(p_drv_buf->qos_pri);
+ }
+ break;
+ case DRV_TLV_PORT_ALIAS:
+ if (p_drv_buf->port_alias_set) {
+ p_buf->p_val = &p_drv_buf->port_alias;
+ return sizeof(p_drv_buf->port_alias);
+ }
+ break;
+ case DRV_TLV_PORT_STATE:
+ if (p_drv_buf->port_state_set) {
+ p_buf->p_val = &p_drv_buf->port_state;
+ return sizeof(p_drv_buf->port_state);
+ }
+ break;
+ case DRV_TLV_FIP_TX_DESCRIPTORS_QUEUE_SIZE:
+ if (p_drv_buf->fip_tx_descr_size_set) {
+ p_buf->p_val = &p_drv_buf->fip_tx_descr_size;
+ return sizeof(p_drv_buf->fip_tx_descr_size);
+ }
+ break;
+ case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_SIZE:
+ if (p_drv_buf->fip_rx_descr_size_set) {
+ p_buf->p_val = &p_drv_buf->fip_rx_descr_size;
+ return sizeof(p_drv_buf->fip_rx_descr_size);
+ }
+ break;
+ case DRV_TLV_LINK_FAILURE_COUNT:
+ if (p_drv_buf->link_failures_set) {
+ p_buf->p_val = &p_drv_buf->link_failures;
+ return sizeof(p_drv_buf->link_failures);
+ }
+ break;
+ case DRV_TLV_FCOE_BOOT_PROGRESS:
+ if (p_drv_buf->fcoe_boot_progress_set) {
+ p_buf->p_val = &p_drv_buf->fcoe_boot_progress;
+ return sizeof(p_drv_buf->fcoe_boot_progress);
+ }
+ break;
+ case DRV_TLV_RX_BROADCAST_PACKETS:
+ if (p_drv_buf->rx_bcast_set) {
+ p_buf->p_val = &p_drv_buf->rx_bcast;
+ return sizeof(p_drv_buf->rx_bcast);
+ }
+ break;
+ case DRV_TLV_TX_BROADCAST_PACKETS:
+ if (p_drv_buf->tx_bcast_set) {
+ p_buf->p_val = &p_drv_buf->tx_bcast;
+ return sizeof(p_drv_buf->tx_bcast);
+ }
+ break;
+ case DRV_TLV_FCOE_TX_DESCRIPTOR_QUEUE_AVG_DEPTH:
+ if (p_drv_buf->fcoe_txq_depth_set) {
+ p_buf->p_val = &p_drv_buf->fcoe_txq_depth;
+ return sizeof(p_drv_buf->fcoe_txq_depth);
+ }
+ break;
+ case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_AVG_DEPTH:
+ if (p_drv_buf->fcoe_rxq_depth_set) {
+ p_buf->p_val = &p_drv_buf->fcoe_rxq_depth;
+ return sizeof(p_drv_buf->fcoe_rxq_depth);
+ }
+ break;
+ case DRV_TLV_FCOE_RX_FRAMES_RECEIVED:
+ if (p_drv_buf->fcoe_rx_frames_set) {
+ p_buf->p_val = &p_drv_buf->fcoe_rx_frames;
+ return sizeof(p_drv_buf->fcoe_rx_frames);
+ }
+ break;
+ case DRV_TLV_FCOE_RX_BYTES_RECEIVED:
+ if (p_drv_buf->fcoe_rx_bytes_set) {
+ p_buf->p_val = &p_drv_buf->fcoe_rx_bytes;
+ return sizeof(p_drv_buf->fcoe_rx_bytes);
+ }
+ break;
+ case DRV_TLV_FCOE_TX_FRAMES_SENT:
+ if (p_drv_buf->fcoe_tx_frames_set) {
+ p_buf->p_val = &p_drv_buf->fcoe_tx_frames;
+ return sizeof(p_drv_buf->fcoe_tx_frames);
+ }
+ break;
+ case DRV_TLV_FCOE_TX_BYTES_SENT:
+ if (p_drv_buf->fcoe_tx_bytes_set) {
+ p_buf->p_val = &p_drv_buf->fcoe_tx_bytes;
+ return sizeof(p_drv_buf->fcoe_tx_bytes);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_COUNT:
+ if (p_drv_buf->crc_count_set) {
+ p_buf->p_val = &p_drv_buf->crc_count;
+ return sizeof(p_drv_buf->crc_count);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_CRC_ERROR_2_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_CRC_ERROR_3_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_CRC_ERROR_4_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_CRC_ERROR_5_RECEIVED_SOURCE_FC_ID:
+ idx = (p_tlv->tlv_type -
+ DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID) / 2;
+
+ if (p_drv_buf->crc_err_src_fcid_set[idx]) {
+ p_buf->p_val = &p_drv_buf->crc_err_src_fcid[idx];
+ return sizeof(p_drv_buf->crc_err_src_fcid[idx]);
+ }
+ break;
+ case DRV_TLV_CRC_ERROR_1_TIMESTAMP:
+ case DRV_TLV_CRC_ERROR_2_TIMESTAMP:
+ case DRV_TLV_CRC_ERROR_3_TIMESTAMP:
+ case DRV_TLV_CRC_ERROR_4_TIMESTAMP:
+ case DRV_TLV_CRC_ERROR_5_TIMESTAMP:
+ idx = (p_tlv->tlv_type - DRV_TLV_CRC_ERROR_1_TIMESTAMP) / 2;
+
+ return qed_mfw_get_tlv_time_value(&p_drv_buf->crc_err[idx],
+ p_buf);
+ case DRV_TLV_LOSS_OF_SYNC_ERROR_COUNT:
+ if (p_drv_buf->losync_err_set) {
+ p_buf->p_val = &p_drv_buf->losync_err;
+ return sizeof(p_drv_buf->losync_err);
+ }
+ break;
+ case DRV_TLV_LOSS_OF_SIGNAL_ERRORS:
+ if (p_drv_buf->losig_err_set) {
+ p_buf->p_val = &p_drv_buf->losig_err;
+ return sizeof(p_drv_buf->losig_err);
+ }
+ break;
+ case DRV_TLV_PRIMITIVE_SEQUENCE_PROTOCOL_ERROR_COUNT:
+ if (p_drv_buf->primtive_err_set) {
+ p_buf->p_val = &p_drv_buf->primtive_err;
+ return sizeof(p_drv_buf->primtive_err);
+ }
+ break;
+ case DRV_TLV_DISPARITY_ERROR_COUNT:
+ if (p_drv_buf->disparity_err_set) {
+ p_buf->p_val = &p_drv_buf->disparity_err;
+ return sizeof(p_drv_buf->disparity_err);
+ }
+ break;
+ case DRV_TLV_CODE_VIOLATION_ERROR_COUNT:
+ if (p_drv_buf->code_violation_err_set) {
+ p_buf->p_val = &p_drv_buf->code_violation_err;
+ return sizeof(p_drv_buf->code_violation_err);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1:
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_2:
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_3:
+ case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_4:
+ idx = p_tlv->tlv_type -
+ DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1;
+ if (p_drv_buf->flogi_param_set[idx]) {
+ p_buf->p_val = &p_drv_buf->flogi_param[idx];
+ return sizeof(p_drv_buf->flogi_param[idx]);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_TIMESTAMP:
+ return qed_mfw_get_tlv_time_value(&p_drv_buf->flogi_tstamp,
+ p_buf);
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1:
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_2:
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_3:
+ case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_4:
+ idx = p_tlv->tlv_type -
+ DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1;
+
+ if (p_drv_buf->flogi_acc_param_set[idx]) {
+ p_buf->p_val = &p_drv_buf->flogi_acc_param[idx];
+ return sizeof(p_drv_buf->flogi_acc_param[idx]);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_ACC_TIMESTAMP:
+ return qed_mfw_get_tlv_time_value(&p_drv_buf->flogi_acc_tstamp,
+ p_buf);
+ case DRV_TLV_LAST_FLOGI_RJT:
+ if (p_drv_buf->flogi_rjt_set) {
+ p_buf->p_val = &p_drv_buf->flogi_rjt;
+ return sizeof(p_drv_buf->flogi_rjt);
+ }
+ break;
+ case DRV_TLV_LAST_FLOGI_RJT_TIMESTAMP:
+ return qed_mfw_get_tlv_time_value(&p_drv_buf->flogi_rjt_tstamp,
+ p_buf);
+ case DRV_TLV_FDISCS_SENT_COUNT:
+ if (p_drv_buf->fdiscs_set) {
+ p_buf->p_val = &p_drv_buf->fdiscs;
+ return sizeof(p_drv_buf->fdiscs);
+ }
+ break;
+ case DRV_TLV_FDISC_ACCS_RECEIVED:
+ if (p_drv_buf->fdisc_acc_set) {
+ p_buf->p_val = &p_drv_buf->fdisc_acc;
+ return sizeof(p_drv_buf->fdisc_acc);
+ }
+ break;
+ case DRV_TLV_FDISC_RJTS_RECEIVED:
+ if (p_drv_buf->fdisc_rjt_set) {
+ p_buf->p_val = &p_drv_buf->fdisc_rjt;
+ return sizeof(p_drv_buf->fdisc_rjt);
+ }
+ break;
+ case DRV_TLV_PLOGI_SENT_COUNT:
+ if (p_drv_buf->plogi_set) {
+ p_buf->p_val = &p_drv_buf->plogi;
+ return sizeof(p_drv_buf->plogi);
+ }
+ break;
+ case DRV_TLV_PLOGI_ACCS_RECEIVED:
+ if (p_drv_buf->plogi_acc_set) {
+ p_buf->p_val = &p_drv_buf->plogi_acc;
+ return sizeof(p_drv_buf->plogi_acc);
+ }
+ break;
+ case DRV_TLV_PLOGI_RJTS_RECEIVED:
+ if (p_drv_buf->plogi_rjt_set) {
+ p_buf->p_val = &p_drv_buf->plogi_rjt;
+ return sizeof(p_drv_buf->plogi_rjt);
+ }
+ break;
+ case DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_PLOGI_2_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_PLOGI_3_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_PLOGI_4_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_PLOGI_5_SENT_DESTINATION_FC_ID:
+ idx = (p_tlv->tlv_type -
+ DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID) / 2;
+
+ if (p_drv_buf->plogi_dst_fcid_set[idx]) {
+ p_buf->p_val = &p_drv_buf->plogi_dst_fcid[idx];
+ return sizeof(p_drv_buf->plogi_dst_fcid[idx]);
+ }
+ break;
+ case DRV_TLV_PLOGI_1_TIMESTAMP:
+ case DRV_TLV_PLOGI_2_TIMESTAMP:
+ case DRV_TLV_PLOGI_3_TIMESTAMP:
+ case DRV_TLV_PLOGI_4_TIMESTAMP:
+ case DRV_TLV_PLOGI_5_TIMESTAMP:
+ idx = (p_tlv->tlv_type - DRV_TLV_PLOGI_1_TIMESTAMP) / 2;
+
+ return qed_mfw_get_tlv_time_value(&p_drv_buf->plogi_tstamp[idx],
+ p_buf);
+ case DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_PLOGI_2_ACC_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_PLOGI_3_ACC_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_PLOGI_4_ACC_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_PLOGI_5_ACC_RECEIVED_SOURCE_FC_ID:
+ idx = (p_tlv->tlv_type -
+ DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID) / 2;
+
+ if (p_drv_buf->plogi_acc_src_fcid_set[idx]) {
+ p_buf->p_val = &p_drv_buf->plogi_acc_src_fcid[idx];
+ return sizeof(p_drv_buf->plogi_acc_src_fcid[idx]);
+ }
+ break;
+ case DRV_TLV_PLOGI_1_ACC_TIMESTAMP:
+ case DRV_TLV_PLOGI_2_ACC_TIMESTAMP:
+ case DRV_TLV_PLOGI_3_ACC_TIMESTAMP:
+ case DRV_TLV_PLOGI_4_ACC_TIMESTAMP:
+ case DRV_TLV_PLOGI_5_ACC_TIMESTAMP:
+ idx = (p_tlv->tlv_type - DRV_TLV_PLOGI_1_ACC_TIMESTAMP) / 2;
+ p_time = &p_drv_buf->plogi_acc_tstamp[idx];
+
+ return qed_mfw_get_tlv_time_value(p_time, p_buf);
+ case DRV_TLV_LOGOS_ISSUED:
+ if (p_drv_buf->tx_plogos_set) {
+ p_buf->p_val = &p_drv_buf->tx_plogos;
+ return sizeof(p_drv_buf->tx_plogos);
+ }
+ break;
+ case DRV_TLV_LOGO_ACCS_RECEIVED:
+ if (p_drv_buf->plogo_acc_set) {
+ p_buf->p_val = &p_drv_buf->plogo_acc;
+ return sizeof(p_drv_buf->plogo_acc);
+ }
+ break;
+ case DRV_TLV_LOGO_RJTS_RECEIVED:
+ if (p_drv_buf->plogo_rjt_set) {
+ p_buf->p_val = &p_drv_buf->plogo_rjt;
+ return sizeof(p_drv_buf->plogo_rjt);
+ }
+ break;
+ case DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_LOGO_2_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_LOGO_3_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_LOGO_4_RECEIVED_SOURCE_FC_ID:
+ case DRV_TLV_LOGO_5_RECEIVED_SOURCE_FC_ID:
+ idx = (p_tlv->tlv_type - DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID) /
+ 2;
+
+ if (p_drv_buf->plogo_src_fcid_set[idx]) {
+ p_buf->p_val = &p_drv_buf->plogo_src_fcid[idx];
+ return sizeof(p_drv_buf->plogo_src_fcid[idx]);
+ }
+ break;
+ case DRV_TLV_LOGO_1_TIMESTAMP:
+ case DRV_TLV_LOGO_2_TIMESTAMP:
+ case DRV_TLV_LOGO_3_TIMESTAMP:
+ case DRV_TLV_LOGO_4_TIMESTAMP:
+ case DRV_TLV_LOGO_5_TIMESTAMP:
+ idx = (p_tlv->tlv_type - DRV_TLV_LOGO_1_TIMESTAMP) / 2;
+
+ return qed_mfw_get_tlv_time_value(&p_drv_buf->plogo_tstamp[idx],
+ p_buf);
+ case DRV_TLV_LOGOS_RECEIVED:
+ if (p_drv_buf->rx_logos_set) {
+ p_buf->p_val = &p_drv_buf->rx_logos;
+ return sizeof(p_drv_buf->rx_logos);
+ }
+ break;
+ case DRV_TLV_ACCS_ISSUED:
+ if (p_drv_buf->tx_accs_set) {
+ p_buf->p_val = &p_drv_buf->tx_accs;
+ return sizeof(p_drv_buf->tx_accs);
+ }
+ break;
+ case DRV_TLV_PRLIS_ISSUED:
+ if (p_drv_buf->tx_prlis_set) {
+ p_buf->p_val = &p_drv_buf->tx_prlis;
+ return sizeof(p_drv_buf->tx_prlis);
+ }
+ break;
+ case DRV_TLV_ACCS_RECEIVED:
+ if (p_drv_buf->rx_accs_set) {
+ p_buf->p_val = &p_drv_buf->rx_accs;
+ return sizeof(p_drv_buf->rx_accs);
+ }
+ break;
+ case DRV_TLV_ABTS_SENT_COUNT:
+ if (p_drv_buf->tx_abts_set) {
+ p_buf->p_val = &p_drv_buf->tx_abts;
+ return sizeof(p_drv_buf->tx_abts);
+ }
+ break;
+ case DRV_TLV_ABTS_ACCS_RECEIVED:
+ if (p_drv_buf->rx_abts_acc_set) {
+ p_buf->p_val = &p_drv_buf->rx_abts_acc;
+ return sizeof(p_drv_buf->rx_abts_acc);
+ }
+ break;
+ case DRV_TLV_ABTS_RJTS_RECEIVED:
+ if (p_drv_buf->rx_abts_rjt_set) {
+ p_buf->p_val = &p_drv_buf->rx_abts_rjt;
+ return sizeof(p_drv_buf->rx_abts_rjt);
+ }
+ break;
+ case DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_ABTS_2_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_ABTS_3_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_ABTS_4_SENT_DESTINATION_FC_ID:
+ case DRV_TLV_ABTS_5_SENT_DESTINATION_FC_ID:
+ idx = (p_tlv->tlv_type -
+ DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID) / 2;
+
+ if (p_drv_buf->abts_dst_fcid_set[idx]) {
+ p_buf->p_val = &p_drv_buf->abts_dst_fcid[idx];
+ return sizeof(p_drv_buf->abts_dst_fcid[idx]);
+ }
+ break;
+ case DRV_TLV_ABTS_1_TIMESTAMP:
+ case DRV_TLV_ABTS_2_TIMESTAMP:
+ case DRV_TLV_ABTS_3_TIMESTAMP:
+ case DRV_TLV_ABTS_4_TIMESTAMP:
+ case DRV_TLV_ABTS_5_TIMESTAMP:
+ idx = (p_tlv->tlv_type - DRV_TLV_ABTS_1_TIMESTAMP) / 2;
+
+ return qed_mfw_get_tlv_time_value(&p_drv_buf->abts_tstamp[idx],
+ p_buf);
+ case DRV_TLV_RSCNS_RECEIVED:
+ if (p_drv_buf->rx_rscn_set) {
+ p_buf->p_val = &p_drv_buf->rx_rscn;
+ return sizeof(p_drv_buf->rx_rscn);
+ }
+ break;
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1:
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_2:
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_3:
+ case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_4:
+ idx = p_tlv->tlv_type - DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1;
+
+ if (p_drv_buf->rx_rscn_nport_set[idx]) {
+ p_buf->p_val = &p_drv_buf->rx_rscn_nport[idx];
+ return sizeof(p_drv_buf->rx_rscn_nport[idx]);
+ }
+ break;
+ case DRV_TLV_LUN_RESETS_ISSUED:
+ if (p_drv_buf->tx_lun_rst_set) {
+ p_buf->p_val = &p_drv_buf->tx_lun_rst;
+ return sizeof(p_drv_buf->tx_lun_rst);
+ }
+ break;
+ case DRV_TLV_ABORT_TASK_SETS_ISSUED:
+ if (p_drv_buf->abort_task_sets_set) {
+ p_buf->p_val = &p_drv_buf->abort_task_sets;
+ return sizeof(p_drv_buf->abort_task_sets);
+ }
+ break;
+ case DRV_TLV_TPRLOS_SENT:
+ if (p_drv_buf->tx_tprlos_set) {
+ p_buf->p_val = &p_drv_buf->tx_tprlos;
+ return sizeof(p_drv_buf->tx_tprlos);
+ }
+ break;
+ case DRV_TLV_NOS_SENT_COUNT:
+ if (p_drv_buf->tx_nos_set) {
+ p_buf->p_val = &p_drv_buf->tx_nos;
+ return sizeof(p_drv_buf->tx_nos);
+ }
+ break;
+ case DRV_TLV_NOS_RECEIVED_COUNT:
+ if (p_drv_buf->rx_nos_set) {
+ p_buf->p_val = &p_drv_buf->rx_nos;
+ return sizeof(p_drv_buf->rx_nos);
+ }
+ break;
+ case DRV_TLV_OLS_COUNT:
+ if (p_drv_buf->ols_set) {
+ p_buf->p_val = &p_drv_buf->ols;
+ return sizeof(p_drv_buf->ols);
+ }
+ break;
+ case DRV_TLV_LR_COUNT:
+ if (p_drv_buf->lr_set) {
+ p_buf->p_val = &p_drv_buf->lr;
+ return sizeof(p_drv_buf->lr);
+ }
+ break;
+ case DRV_TLV_LRR_COUNT:
+ if (p_drv_buf->lrr_set) {
+ p_buf->p_val = &p_drv_buf->lrr;
+ return sizeof(p_drv_buf->lrr);
+ }
+ break;
+ case DRV_TLV_LIP_SENT_COUNT:
+ if (p_drv_buf->tx_lip_set) {
+ p_buf->p_val = &p_drv_buf->tx_lip;
+ return sizeof(p_drv_buf->tx_lip);
+ }
+ break;
+ case DRV_TLV_LIP_RECEIVED_COUNT:
+ if (p_drv_buf->rx_lip_set) {
+ p_buf->p_val = &p_drv_buf->rx_lip;
+ return sizeof(p_drv_buf->rx_lip);
+ }
+ break;
+ case DRV_TLV_EOFA_COUNT:
+ if (p_drv_buf->eofa_set) {
+ p_buf->p_val = &p_drv_buf->eofa;
+ return sizeof(p_drv_buf->eofa);
+ }
+ break;
+ case DRV_TLV_EOFNI_COUNT:
+ if (p_drv_buf->eofni_set) {
+ p_buf->p_val = &p_drv_buf->eofni;
+ return sizeof(p_drv_buf->eofni);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_CHECK_CONDITION_COUNT:
+ if (p_drv_buf->scsi_chks_set) {
+ p_buf->p_val = &p_drv_buf->scsi_chks;
+ return sizeof(p_drv_buf->scsi_chks);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_CONDITION_MET_COUNT:
+ if (p_drv_buf->scsi_cond_met_set) {
+ p_buf->p_val = &p_drv_buf->scsi_cond_met;
+ return sizeof(p_drv_buf->scsi_cond_met);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_BUSY_COUNT:
+ if (p_drv_buf->scsi_busy_set) {
+ p_buf->p_val = &p_drv_buf->scsi_busy;
+ return sizeof(p_drv_buf->scsi_busy);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_INTERMEDIATE_COUNT:
+ if (p_drv_buf->scsi_inter_set) {
+ p_buf->p_val = &p_drv_buf->scsi_inter;
+ return sizeof(p_drv_buf->scsi_inter);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_INTERMEDIATE_CONDITION_MET_COUNT:
+ if (p_drv_buf->scsi_inter_cond_met_set) {
+ p_buf->p_val = &p_drv_buf->scsi_inter_cond_met;
+ return sizeof(p_drv_buf->scsi_inter_cond_met);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_RESERVATION_CONFLICT_COUNT:
+ if (p_drv_buf->scsi_rsv_conflicts_set) {
+ p_buf->p_val = &p_drv_buf->scsi_rsv_conflicts;
+ return sizeof(p_drv_buf->scsi_rsv_conflicts);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_TASK_SET_FULL_COUNT:
+ if (p_drv_buf->scsi_tsk_full_set) {
+ p_buf->p_val = &p_drv_buf->scsi_tsk_full;
+ return sizeof(p_drv_buf->scsi_tsk_full);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_ACA_ACTIVE_COUNT:
+ if (p_drv_buf->scsi_aca_active_set) {
+ p_buf->p_val = &p_drv_buf->scsi_aca_active;
+ return sizeof(p_drv_buf->scsi_aca_active);
+ }
+ break;
+ case DRV_TLV_SCSI_STATUS_TASK_ABORTED_COUNT:
+ if (p_drv_buf->scsi_tsk_abort_set) {
+ p_buf->p_val = &p_drv_buf->scsi_tsk_abort;
+ return sizeof(p_drv_buf->scsi_tsk_abort);
+ }
+ break;
+ case DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ:
+ case DRV_TLV_SCSI_CHECK_CONDITION_2_RECEIVED_SK_ASC_ASCQ:
+ case DRV_TLV_SCSI_CHECK_CONDITION_3_RECEIVED_SK_ASC_ASCQ:
+ case DRV_TLV_SCSI_CHECK_CONDITION_4_RECEIVED_SK_ASC_ASCQ:
+ case DRV_TLV_SCSI_CHECK_CONDITION_5_RECEIVED_SK_ASC_ASCQ:
+ idx = (p_tlv->tlv_type -
+ DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ) / 2;
+
+ if (p_drv_buf->scsi_rx_chk_set[idx]) {
+ p_buf->p_val = &p_drv_buf->scsi_rx_chk[idx];
+ return sizeof(p_drv_buf->scsi_rx_chk[idx]);
+ }
+ break;
+ case DRV_TLV_SCSI_CHECK_1_TIMESTAMP:
+ case DRV_TLV_SCSI_CHECK_2_TIMESTAMP:
+ case DRV_TLV_SCSI_CHECK_3_TIMESTAMP:
+ case DRV_TLV_SCSI_CHECK_4_TIMESTAMP:
+ case DRV_TLV_SCSI_CHECK_5_TIMESTAMP:
+ idx = (p_tlv->tlv_type - DRV_TLV_SCSI_CHECK_1_TIMESTAMP) / 2;
+ p_time = &p_drv_buf->scsi_chk_tstamp[idx];
+
+ return qed_mfw_get_tlv_time_value(p_time, p_buf);
+ default:
+ break;
+ }
+
+ return -1;
+}
+
+static int
+qed_mfw_get_iscsi_tlv_value(struct qed_drv_tlv_hdr *p_tlv,
+ struct qed_mfw_tlv_iscsi *p_drv_buf,
+ struct qed_tlv_parsed_buf *p_buf)
+{
+ switch (p_tlv->tlv_type) {
+ case DRV_TLV_TARGET_LLMNR_ENABLED:
+ if (p_drv_buf->target_llmnr_set) {
+ p_buf->p_val = &p_drv_buf->target_llmnr;
+ return sizeof(p_drv_buf->target_llmnr);
+ }
+ break;
+ case DRV_TLV_HEADER_DIGEST_FLAG_ENABLED:
+ if (p_drv_buf->header_digest_set) {
+ p_buf->p_val = &p_drv_buf->header_digest;
+ return sizeof(p_drv_buf->header_digest);
+ }
+ break;
+ case DRV_TLV_DATA_DIGEST_FLAG_ENABLED:
+ if (p_drv_buf->data_digest_set) {
+ p_buf->p_val = &p_drv_buf->data_digest;
+ return sizeof(p_drv_buf->data_digest);
+ }
+ break;
+ case DRV_TLV_AUTHENTICATION_METHOD:
+ if (p_drv_buf->auth_method_set) {
+ p_buf->p_val = &p_drv_buf->auth_method;
+ return sizeof(p_drv_buf->auth_method);
+ }
+ break;
+ case DRV_TLV_ISCSI_BOOT_TARGET_PORTAL:
+ if (p_drv_buf->boot_taget_portal_set) {
+ p_buf->p_val = &p_drv_buf->boot_taget_portal;
+ return sizeof(p_drv_buf->boot_taget_portal);
+ }
+ break;
+ case DRV_TLV_MAX_FRAME_SIZE:
+ if (p_drv_buf->frame_size_set) {
+ p_buf->p_val = &p_drv_buf->frame_size;
+ return sizeof(p_drv_buf->frame_size);
+ }
+ break;
+ case DRV_TLV_PDU_TX_DESCRIPTORS_QUEUE_SIZE:
+ if (p_drv_buf->tx_desc_size_set) {
+ p_buf->p_val = &p_drv_buf->tx_desc_size;
+ return sizeof(p_drv_buf->tx_desc_size);
+ }
+ break;
+ case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_SIZE:
+ if (p_drv_buf->rx_desc_size_set) {
+ p_buf->p_val = &p_drv_buf->rx_desc_size;
+ return sizeof(p_drv_buf->rx_desc_size);
+ }
+ break;
+ case DRV_TLV_ISCSI_BOOT_PROGRESS:
+ if (p_drv_buf->boot_progress_set) {
+ p_buf->p_val = &p_drv_buf->boot_progress;
+ return sizeof(p_drv_buf->boot_progress);
+ }
+ break;
+ case DRV_TLV_PDU_TX_DESCRIPTOR_QUEUE_AVG_DEPTH:
+ if (p_drv_buf->tx_desc_qdepth_set) {
+ p_buf->p_val = &p_drv_buf->tx_desc_qdepth;
+ return sizeof(p_drv_buf->tx_desc_qdepth);
+ }
+ break;
+ case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_AVG_DEPTH:
+ if (p_drv_buf->rx_desc_qdepth_set) {
+ p_buf->p_val = &p_drv_buf->rx_desc_qdepth;
+ return sizeof(p_drv_buf->rx_desc_qdepth);
+ }
+ break;
+ case DRV_TLV_ISCSI_PDU_RX_FRAMES_RECEIVED:
+ if (p_drv_buf->rx_frames_set) {
+ p_buf->p_val = &p_drv_buf->rx_frames;
+ return sizeof(p_drv_buf->rx_frames);
+ }
+ break;
+ case DRV_TLV_ISCSI_PDU_RX_BYTES_RECEIVED:
+ if (p_drv_buf->rx_bytes_set) {
+ p_buf->p_val = &p_drv_buf->rx_bytes;
+ return sizeof(p_drv_buf->rx_bytes);
+ }
+ break;
+ case DRV_TLV_ISCSI_PDU_TX_FRAMES_SENT:
+ if (p_drv_buf->tx_frames_set) {
+ p_buf->p_val = &p_drv_buf->tx_frames;
+ return sizeof(p_drv_buf->tx_frames);
+ }
+ break;
+ case DRV_TLV_ISCSI_PDU_TX_BYTES_SENT:
+ if (p_drv_buf->tx_bytes_set) {
+ p_buf->p_val = &p_drv_buf->tx_bytes;
+ return sizeof(p_drv_buf->tx_bytes);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return -1;
+}
+
+static int qed_mfw_update_tlvs(struct qed_hwfn *p_hwfn,
+ u8 tlv_group, u8 *p_mfw_buf, u32 size)
+{
+ union qed_mfw_tlv_data *p_tlv_data;
+ struct qed_tlv_parsed_buf buffer;
+ struct qed_drv_tlv_hdr tlv;
+ int len = 0;
+ u32 offset;
+ u8 *p_tlv;
+
+ p_tlv_data = vzalloc(sizeof(*p_tlv_data));
+ if (!p_tlv_data)
+ return -ENOMEM;
+
+ if (qed_mfw_fill_tlv_data(p_hwfn, tlv_group, p_tlv_data)) {
+ vfree(p_tlv_data);
+ return -EINVAL;
+ }
+
+ memset(&tlv, 0, sizeof(tlv));
+ for (offset = 0; offset < size;
+ offset += sizeof(tlv) + sizeof(u32) * tlv.tlv_length) {
+ p_tlv = &p_mfw_buf[offset];
+ tlv.tlv_type = TLV_TYPE(p_tlv);
+ tlv.tlv_length = TLV_LENGTH(p_tlv);
+ tlv.tlv_flags = TLV_FLAGS(p_tlv);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "Type %d length = %d flags = 0x%x\n", tlv.tlv_type,
+ tlv.tlv_length, tlv.tlv_flags);
+
+ if (tlv_group == QED_MFW_TLV_GENERIC)
+ len = qed_mfw_get_gen_tlv_value(&tlv,
+ &p_tlv_data->generic,
+ &buffer);
+ else if (tlv_group == QED_MFW_TLV_ETH)
+ len = qed_mfw_get_eth_tlv_value(&tlv,
+ &p_tlv_data->eth,
+ &buffer);
+ else if (tlv_group == QED_MFW_TLV_FCOE)
+ len = qed_mfw_get_fcoe_tlv_value(&tlv,
+ &p_tlv_data->fcoe,
+ &buffer);
+ else
+ len = qed_mfw_get_iscsi_tlv_value(&tlv,
+ &p_tlv_data->iscsi,
+ &buffer);
+
+ if (len > 0) {
+ WARN(len > 4 * tlv.tlv_length,
+ "Incorrect MFW TLV length %d, it shouldn't be greater than %d\n",
+ len, 4 * tlv.tlv_length);
+ len = min_t(int, len, 4 * tlv.tlv_length);
+ tlv.tlv_flags |= QED_DRV_TLV_FLAGS_CHANGED;
+ TLV_FLAGS(p_tlv) = tlv.tlv_flags;
+ memcpy(p_mfw_buf + offset + sizeof(tlv),
+ buffer.p_val, len);
+ }
+ }
+
+ vfree(p_tlv_data);
+
+ return 0;
+}
+
+int qed_mfw_process_tlv_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 addr, size, offset, resp, param, val, global_offsize, global_addr;
+ u8 tlv_group = 0, id, *p_mfw_buf = NULL, *p_temp;
+ struct qed_drv_tlv_hdr tlv;
+ int rc;
+
+ addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_GLOBAL);
+ global_offsize = qed_rd(p_hwfn, p_ptt, addr);
+ global_addr = SECTION_ADDR(global_offsize, 0);
+ addr = global_addr + offsetof(struct public_global, data_ptr);
+ addr = qed_rd(p_hwfn, p_ptt, addr);
+ size = qed_rd(p_hwfn, p_ptt, global_addr +
+ offsetof(struct public_global, data_size));
+
+ if (!size) {
+ DP_NOTICE(p_hwfn, "Invalid TLV req size = %d\n", size);
+ goto drv_done;
+ }
+
+ p_mfw_buf = vzalloc(size);
+ if (!p_mfw_buf) {
+ DP_NOTICE(p_hwfn, "Failed allocate memory for p_mfw_buf\n");
+ goto drv_done;
+ }
+
+ /* Read the TLV request to local buffer. MFW represents the TLV in
+ * little endian format and mcp returns it bigendian format. Hence
+ * driver need to convert data to little endian first and then do the
+ * memcpy (casting) to preserve the MFW TLV format in the driver buffer.
+ *
+ */
+ for (offset = 0; offset < size; offset += sizeof(u32)) {
+ val = qed_rd(p_hwfn, p_ptt, addr + offset);
+ val = be32_to_cpu((__force __be32)val);
+ memcpy(&p_mfw_buf[offset], &val, sizeof(u32));
+ }
+
+ /* Parse the headers to enumerate the requested TLV groups */
+ for (offset = 0; offset < size;
+ offset += sizeof(tlv) + sizeof(u32) * tlv.tlv_length) {
+ p_temp = &p_mfw_buf[offset];
+ tlv.tlv_type = TLV_TYPE(p_temp);
+ tlv.tlv_length = TLV_LENGTH(p_temp);
+ if (qed_mfw_get_tlv_group(tlv.tlv_type, &tlv_group))
+ DP_VERBOSE(p_hwfn, NETIF_MSG_DRV,
+ "Un recognized TLV %d\n", tlv.tlv_type);
+ }
+
+ /* Sanitize the TLV groups according to personality */
+ if ((tlv_group & QED_MFW_TLV_ETH) && !QED_IS_L2_PERSONALITY(p_hwfn)) {
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "Skipping L2 TLVs for non-L2 function\n");
+ tlv_group &= ~QED_MFW_TLV_ETH;
+ }
+
+ if ((tlv_group & QED_MFW_TLV_FCOE) &&
+ p_hwfn->hw_info.personality != QED_PCI_FCOE) {
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "Skipping FCoE TLVs for non-FCoE function\n");
+ tlv_group &= ~QED_MFW_TLV_FCOE;
+ }
+
+ if ((tlv_group & QED_MFW_TLV_ISCSI) &&
+ p_hwfn->hw_info.personality != QED_PCI_ISCSI &&
+ p_hwfn->hw_info.personality != QED_PCI_NVMETCP) {
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "Skipping iSCSI TLVs for non-iSCSI function\n");
+ tlv_group &= ~QED_MFW_TLV_ISCSI;
+ }
+
+ /* Update the TLV values in the local buffer */
+ for (id = QED_MFW_TLV_GENERIC; id < QED_MFW_TLV_MAX; id <<= 1) {
+ if (tlv_group & id)
+ if (qed_mfw_update_tlvs(p_hwfn, id, p_mfw_buf, size))
+ goto drv_done;
+ }
+
+ /* Write the TLV data to shared memory. The stream of 4 bytes first need
+ * to be mem-copied to u32 element to make it as LSB format. And then
+ * converted to big endian as required by mcp-write.
+ */
+ for (offset = 0; offset < size; offset += sizeof(u32)) {
+ memcpy(&val, &p_mfw_buf[offset], sizeof(u32));
+ val = (__force u32)cpu_to_be32(val);
+ qed_wr(p_hwfn, p_ptt, addr + offset, val);
+ }
+
+drv_done:
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_TLV_DONE, 0, &resp,
+ &param);
+
+ vfree(p_mfw_buf);
+
+ return rc;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_nvmetcp.c b/drivers/net/ethernet/qlogic/qed/qed_nvmetcp.c
new file mode 100644
index 000000000..f19128c8d
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_nvmetcp.c
@@ -0,0 +1,829 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/* Copyright 2021 Marvell. All rights reserved. */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <asm/param.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/kernel.h>
+#include <linux/log2.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/qed/qed_nvmetcp_if.h>
+#include "qed.h"
+#include "qed_cxt.h"
+#include "qed_dev_api.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_int.h"
+#include "qed_nvmetcp.h"
+#include "qed_ll2.h"
+#include "qed_mcp.h"
+#include "qed_sp.h"
+#include "qed_reg_addr.h"
+#include "qed_nvmetcp_fw_funcs.h"
+
+static int qed_nvmetcp_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code,
+ u16 echo, union event_ring_data *data,
+ u8 fw_return_code)
+{
+ if (p_hwfn->p_nvmetcp_info->event_cb) {
+ struct qed_nvmetcp_info *p_nvmetcp = p_hwfn->p_nvmetcp_info;
+
+ return p_nvmetcp->event_cb(p_nvmetcp->event_context,
+ fw_event_code, data);
+ } else {
+ DP_NOTICE(p_hwfn, "nvmetcp async completion is not set\n");
+
+ return -EINVAL;
+ }
+}
+
+static int qed_sp_nvmetcp_func_start(struct qed_hwfn *p_hwfn,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_addr,
+ void *event_context,
+ nvmetcp_event_cb_t async_event_cb)
+{
+ struct nvmetcp_init_ramrod_params *p_ramrod = NULL;
+ struct qed_nvmetcp_pf_params *p_params = NULL;
+ struct scsi_init_func_queues *p_queue = NULL;
+ struct nvmetcp_spe_func_init *p_init = NULL;
+ struct qed_sp_init_data init_data = {};
+ struct qed_spq_entry *p_ent = NULL;
+ int rc = 0;
+ u16 val;
+ u8 i;
+
+ /* Get SPQ entry */
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_addr;
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ NVMETCP_RAMROD_CMD_ID_INIT_FUNC,
+ PROTOCOLID_TCP_ULP, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.nvmetcp_init;
+ p_init = &p_ramrod->nvmetcp_init_spe;
+ p_params = &p_hwfn->pf_params.nvmetcp_pf_params;
+ p_queue = &p_init->q_params;
+ p_init->num_sq_pages_in_ring = p_params->num_sq_pages_in_ring;
+ p_init->num_r2tq_pages_in_ring = p_params->num_r2tq_pages_in_ring;
+ p_init->num_uhq_pages_in_ring = p_params->num_uhq_pages_in_ring;
+ p_init->ll2_rx_queue_id = RESC_START(p_hwfn, QED_LL2_RAM_QUEUE) +
+ p_params->ll2_ooo_queue_id;
+ SET_FIELD(p_init->flags, NVMETCP_SPE_FUNC_INIT_NVMETCP_MODE, 1);
+ p_init->func_params.log_page_size = ilog2(PAGE_SIZE);
+ p_init->func_params.num_tasks = cpu_to_le16(p_params->num_tasks);
+ p_init->debug_flags = p_params->debug_mode;
+ DMA_REGPAIR_LE(p_queue->glbl_q_params_addr,
+ p_params->glbl_q_params_addr);
+ p_queue->cq_num_entries = cpu_to_le16(QED_NVMETCP_FW_CQ_SIZE);
+ p_queue->num_queues = p_params->num_queues;
+ val = RESC_START(p_hwfn, QED_CMDQS_CQS);
+ p_queue->queue_relative_offset = cpu_to_le16((u16)val);
+ p_queue->cq_sb_pi = p_params->gl_rq_pi;
+
+ for (i = 0; i < p_params->num_queues; i++) {
+ val = qed_get_igu_sb_id(p_hwfn, i);
+ p_queue->cq_cmdq_sb_num_arr[i] = cpu_to_le16(val);
+ }
+
+ SET_FIELD(p_queue->q_validity,
+ SCSI_INIT_FUNC_QUEUES_CMD_VALID, 0);
+ p_queue->cmdq_num_entries = 0;
+ p_queue->bdq_resource_id = (u8)RESC_START(p_hwfn, QED_BDQ);
+ p_ramrod->tcp_init.two_msl_timer = cpu_to_le32(QED_TCP_TWO_MSL_TIMER);
+ p_ramrod->tcp_init.tx_sws_timer = cpu_to_le16(QED_TCP_SWS_TIMER);
+ p_init->half_way_close_timeout = cpu_to_le16(QED_TCP_HALF_WAY_CLOSE_TIMEOUT);
+ p_ramrod->tcp_init.max_fin_rt = QED_TCP_MAX_FIN_RT;
+ SET_FIELD(p_ramrod->nvmetcp_init_spe.params,
+ NVMETCP_SPE_FUNC_INIT_MAX_SYN_RT, QED_TCP_MAX_FIN_RT);
+ p_hwfn->p_nvmetcp_info->event_context = event_context;
+ p_hwfn->p_nvmetcp_info->event_cb = async_event_cb;
+ qed_spq_register_async_cb(p_hwfn, PROTOCOLID_TCP_ULP,
+ qed_nvmetcp_async_event);
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_sp_nvmetcp_func_stop(struct qed_hwfn *p_hwfn,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_addr)
+{
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_addr;
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ NVMETCP_RAMROD_CMD_ID_DESTROY_FUNC,
+ PROTOCOLID_TCP_ULP, &init_data);
+ if (rc)
+ return rc;
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_TCP_ULP);
+
+ return rc;
+}
+
+static int qed_fill_nvmetcp_dev_info(struct qed_dev *cdev,
+ struct qed_dev_nvmetcp_info *info)
+{
+ struct qed_hwfn *hwfn = QED_AFFIN_HWFN(cdev);
+ int rc;
+
+ memset(info, 0, sizeof(*info));
+ rc = qed_fill_dev_info(cdev, &info->common);
+ info->port_id = MFW_PORT(hwfn);
+ info->num_cqs = FEAT_NUM(hwfn, QED_NVMETCP_CQ);
+
+ return rc;
+}
+
+static void qed_register_nvmetcp_ops(struct qed_dev *cdev,
+ struct qed_nvmetcp_cb_ops *ops,
+ void *cookie)
+{
+ cdev->protocol_ops.nvmetcp = ops;
+ cdev->ops_cookie = cookie;
+}
+
+static int qed_nvmetcp_stop(struct qed_dev *cdev)
+{
+ int rc;
+
+ if (!(cdev->flags & QED_FLAG_STORAGE_STARTED)) {
+ DP_NOTICE(cdev, "nvmetcp already stopped\n");
+
+ return 0;
+ }
+
+ if (!hash_empty(cdev->connections)) {
+ DP_NOTICE(cdev,
+ "Can't stop nvmetcp - not all connections were returned\n");
+
+ return -EINVAL;
+ }
+
+ /* Stop the nvmetcp */
+ rc = qed_sp_nvmetcp_func_stop(QED_AFFIN_HWFN(cdev), QED_SPQ_MODE_EBLOCK,
+ NULL);
+ cdev->flags &= ~QED_FLAG_STORAGE_STARTED;
+
+ return rc;
+}
+
+static int qed_nvmetcp_start(struct qed_dev *cdev,
+ struct qed_nvmetcp_tid *tasks,
+ void *event_context,
+ nvmetcp_event_cb_t async_event_cb)
+{
+ struct qed_tid_mem *tid_info;
+ int rc;
+
+ if (cdev->flags & QED_FLAG_STORAGE_STARTED) {
+ DP_NOTICE(cdev, "nvmetcp already started;\n");
+
+ return 0;
+ }
+
+ rc = qed_sp_nvmetcp_func_start(QED_AFFIN_HWFN(cdev),
+ QED_SPQ_MODE_EBLOCK, NULL,
+ event_context, async_event_cb);
+ if (rc) {
+ DP_NOTICE(cdev, "Failed to start nvmetcp\n");
+
+ return rc;
+ }
+
+ cdev->flags |= QED_FLAG_STORAGE_STARTED;
+ hash_init(cdev->connections);
+
+ if (!tasks)
+ return 0;
+
+ tid_info = kzalloc(sizeof(*tid_info), GFP_KERNEL);
+ if (!tid_info) {
+ qed_nvmetcp_stop(cdev);
+
+ return -ENOMEM;
+ }
+
+ rc = qed_cxt_get_tid_mem_info(QED_AFFIN_HWFN(cdev), tid_info);
+ if (rc) {
+ DP_NOTICE(cdev, "Failed to gather task information\n");
+ qed_nvmetcp_stop(cdev);
+ kfree(tid_info);
+
+ return rc;
+ }
+
+ /* Fill task information */
+ tasks->size = tid_info->tid_size;
+ tasks->num_tids_per_block = tid_info->num_tids_per_block;
+ memcpy(tasks->blocks, tid_info->blocks,
+ MAX_TID_BLOCKS_NVMETCP * sizeof(u8 *));
+ kfree(tid_info);
+
+ return 0;
+}
+
+static struct qed_hash_nvmetcp_con *qed_nvmetcp_get_hash(struct qed_dev *cdev,
+ u32 handle)
+{
+ struct qed_hash_nvmetcp_con *hash_con = NULL;
+
+ if (!(cdev->flags & QED_FLAG_STORAGE_STARTED))
+ return NULL;
+
+ hash_for_each_possible(cdev->connections, hash_con, node, handle) {
+ if (hash_con->con->icid == handle)
+ break;
+ }
+
+ if (!hash_con || hash_con->con->icid != handle)
+ return NULL;
+
+ return hash_con;
+}
+
+static int qed_sp_nvmetcp_conn_offload(struct qed_hwfn *p_hwfn,
+ struct qed_nvmetcp_conn *p_conn,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_addr)
+{
+ struct nvmetcp_spe_conn_offload *p_ramrod = NULL;
+ struct tcp_offload_params_opt2 *p_tcp = NULL;
+ struct qed_sp_init_data init_data = { 0 };
+ struct qed_spq_entry *p_ent = NULL;
+ dma_addr_t r2tq_pbl_addr;
+ dma_addr_t xhq_pbl_addr;
+ dma_addr_t uhq_pbl_addr;
+ u16 physical_q;
+ int rc = 0;
+ u8 i;
+
+ /* Get SPQ entry */
+ init_data.cid = p_conn->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_addr;
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ NVMETCP_RAMROD_CMD_ID_OFFLOAD_CONN,
+ PROTOCOLID_TCP_ULP, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.nvmetcp_conn_offload;
+
+ /* Transmission PQ is the first of the PF */
+ physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD);
+ p_conn->physical_q0 = cpu_to_le16(physical_q);
+ p_ramrod->nvmetcp.physical_q0 = cpu_to_le16(physical_q);
+
+ /* nvmetcp Pure-ACK PQ */
+ physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK);
+ p_conn->physical_q1 = cpu_to_le16(physical_q);
+ p_ramrod->nvmetcp.physical_q1 = cpu_to_le16(physical_q);
+ p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id);
+ DMA_REGPAIR_LE(p_ramrod->nvmetcp.sq_pbl_addr, p_conn->sq_pbl_addr);
+ r2tq_pbl_addr = qed_chain_get_pbl_phys(&p_conn->r2tq);
+ DMA_REGPAIR_LE(p_ramrod->nvmetcp.r2tq_pbl_addr, r2tq_pbl_addr);
+ xhq_pbl_addr = qed_chain_get_pbl_phys(&p_conn->xhq);
+ DMA_REGPAIR_LE(p_ramrod->nvmetcp.xhq_pbl_addr, xhq_pbl_addr);
+ uhq_pbl_addr = qed_chain_get_pbl_phys(&p_conn->uhq);
+ DMA_REGPAIR_LE(p_ramrod->nvmetcp.uhq_pbl_addr, uhq_pbl_addr);
+ p_ramrod->nvmetcp.flags = p_conn->offl_flags;
+ p_ramrod->nvmetcp.default_cq = p_conn->default_cq;
+ p_ramrod->nvmetcp.initial_ack = 0;
+ DMA_REGPAIR_LE(p_ramrod->nvmetcp.nvmetcp.cccid_itid_table_addr,
+ p_conn->nvmetcp_cccid_itid_table_addr);
+ p_ramrod->nvmetcp.nvmetcp.cccid_max_range =
+ cpu_to_le16(p_conn->nvmetcp_cccid_max_range);
+ p_tcp = &p_ramrod->tcp;
+ qed_set_fw_mac_addr(&p_tcp->remote_mac_addr_hi,
+ &p_tcp->remote_mac_addr_mid,
+ &p_tcp->remote_mac_addr_lo, p_conn->remote_mac);
+ qed_set_fw_mac_addr(&p_tcp->local_mac_addr_hi,
+ &p_tcp->local_mac_addr_mid,
+ &p_tcp->local_mac_addr_lo, p_conn->local_mac);
+ p_tcp->vlan_id = cpu_to_le16(p_conn->vlan_id);
+ p_tcp->flags = cpu_to_le16(p_conn->tcp_flags);
+ p_tcp->ip_version = p_conn->ip_version;
+ if (p_tcp->ip_version == TCP_IPV6) {
+ for (i = 0; i < 4; i++) {
+ p_tcp->remote_ip[i] = cpu_to_le32(p_conn->remote_ip[i]);
+ p_tcp->local_ip[i] = cpu_to_le32(p_conn->local_ip[i]);
+ }
+ } else {
+ p_tcp->remote_ip[0] = cpu_to_le32(p_conn->remote_ip[0]);
+ p_tcp->local_ip[0] = cpu_to_le32(p_conn->local_ip[0]);
+ }
+
+ p_tcp->flow_label = cpu_to_le32(p_conn->flow_label);
+ p_tcp->ttl = p_conn->ttl;
+ p_tcp->tos_or_tc = p_conn->tos_or_tc;
+ p_tcp->remote_port = cpu_to_le16(p_conn->remote_port);
+ p_tcp->local_port = cpu_to_le16(p_conn->local_port);
+ p_tcp->mss = cpu_to_le16(p_conn->mss);
+ p_tcp->rcv_wnd_scale = p_conn->rcv_wnd_scale;
+ p_tcp->connect_mode = p_conn->connect_mode;
+ p_tcp->cwnd = cpu_to_le32(p_conn->cwnd);
+ p_tcp->ka_max_probe_cnt = p_conn->ka_max_probe_cnt;
+ p_tcp->ka_timeout = cpu_to_le32(p_conn->ka_timeout);
+ p_tcp->max_rt_time = cpu_to_le32(p_conn->max_rt_time);
+ p_tcp->ka_interval = cpu_to_le32(p_conn->ka_interval);
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_sp_nvmetcp_conn_update(struct qed_hwfn *p_hwfn,
+ struct qed_nvmetcp_conn *p_conn,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_addr)
+{
+ struct nvmetcp_conn_update_ramrod_params *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc = -EINVAL;
+ u32 dval;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_conn->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_addr;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ NVMETCP_RAMROD_CMD_ID_UPDATE_CONN,
+ PROTOCOLID_TCP_ULP, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.nvmetcp_conn_update;
+ p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id);
+ p_ramrod->flags = p_conn->update_flag;
+ p_ramrod->max_seq_size = cpu_to_le32(p_conn->max_seq_size);
+ dval = p_conn->max_recv_pdu_length;
+ p_ramrod->max_recv_pdu_length = cpu_to_le32(dval);
+ dval = p_conn->max_send_pdu_length;
+ p_ramrod->max_send_pdu_length = cpu_to_le32(dval);
+ p_ramrod->first_seq_length = cpu_to_le32(p_conn->first_seq_length);
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_sp_nvmetcp_conn_terminate(struct qed_hwfn *p_hwfn,
+ struct qed_nvmetcp_conn *p_conn,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_addr)
+{
+ struct nvmetcp_spe_conn_termination *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc = -EINVAL;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_conn->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_addr;
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ NVMETCP_RAMROD_CMD_ID_TERMINATION_CONN,
+ PROTOCOLID_TCP_ULP, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.nvmetcp_conn_terminate;
+ p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id);
+ p_ramrod->abortive = p_conn->abortive_dsconnect;
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_sp_nvmetcp_conn_clear_sq(struct qed_hwfn *p_hwfn,
+ struct qed_nvmetcp_conn *p_conn,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_addr)
+{
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc = -EINVAL;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = p_conn->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_addr;
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ NVMETCP_RAMROD_CMD_ID_CLEAR_SQ,
+ PROTOCOLID_TCP_ULP, &init_data);
+ if (rc)
+ return rc;
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static void __iomem *qed_nvmetcp_get_db_addr(struct qed_hwfn *p_hwfn, u32 cid)
+{
+ return (u8 __iomem *)p_hwfn->doorbells +
+ qed_db_addr(cid, DQ_DEMS_LEGACY);
+}
+
+static int qed_nvmetcp_allocate_connection(struct qed_hwfn *p_hwfn,
+ struct qed_nvmetcp_conn **p_out_conn)
+{
+ struct qed_chain_init_params params = {
+ .mode = QED_CHAIN_MODE_PBL,
+ .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+ .cnt_type = QED_CHAIN_CNT_TYPE_U16,
+ };
+ struct qed_nvmetcp_pf_params *p_params = NULL;
+ struct qed_nvmetcp_conn *p_conn = NULL;
+ int rc = 0;
+
+ /* Try finding a free connection that can be used */
+ spin_lock_bh(&p_hwfn->p_nvmetcp_info->lock);
+ if (!list_empty(&p_hwfn->p_nvmetcp_info->free_list))
+ p_conn = list_first_entry(&p_hwfn->p_nvmetcp_info->free_list,
+ struct qed_nvmetcp_conn, list_entry);
+ if (p_conn) {
+ list_del(&p_conn->list_entry);
+ spin_unlock_bh(&p_hwfn->p_nvmetcp_info->lock);
+ *p_out_conn = p_conn;
+
+ return 0;
+ }
+ spin_unlock_bh(&p_hwfn->p_nvmetcp_info->lock);
+
+ /* Need to allocate a new connection */
+ p_params = &p_hwfn->pf_params.nvmetcp_pf_params;
+ p_conn = kzalloc(sizeof(*p_conn), GFP_KERNEL);
+ if (!p_conn)
+ return -ENOMEM;
+
+ params.num_elems = p_params->num_r2tq_pages_in_ring *
+ QED_CHAIN_PAGE_SIZE / sizeof(struct nvmetcp_wqe);
+ params.elem_size = sizeof(struct nvmetcp_wqe);
+ rc = qed_chain_alloc(p_hwfn->cdev, &p_conn->r2tq, &params);
+ if (rc)
+ goto nomem_r2tq;
+
+ params.num_elems = p_params->num_uhq_pages_in_ring *
+ QED_CHAIN_PAGE_SIZE / sizeof(struct iscsi_uhqe);
+ params.elem_size = sizeof(struct iscsi_uhqe);
+ rc = qed_chain_alloc(p_hwfn->cdev, &p_conn->uhq, &params);
+ if (rc)
+ goto nomem_uhq;
+
+ params.elem_size = sizeof(struct iscsi_xhqe);
+ rc = qed_chain_alloc(p_hwfn->cdev, &p_conn->xhq, &params);
+ if (rc)
+ goto nomem;
+
+ p_conn->free_on_delete = true;
+ *p_out_conn = p_conn;
+
+ return 0;
+
+nomem:
+ qed_chain_free(p_hwfn->cdev, &p_conn->uhq);
+nomem_uhq:
+ qed_chain_free(p_hwfn->cdev, &p_conn->r2tq);
+nomem_r2tq:
+ kfree(p_conn);
+
+ return -ENOMEM;
+}
+
+static int qed_nvmetcp_acquire_connection(struct qed_hwfn *p_hwfn,
+ struct qed_nvmetcp_conn **p_out_conn)
+{
+ struct qed_nvmetcp_conn *p_conn = NULL;
+ int rc = 0;
+ u32 icid;
+
+ spin_lock_bh(&p_hwfn->p_nvmetcp_info->lock);
+ rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_TCP_ULP, &icid);
+ spin_unlock_bh(&p_hwfn->p_nvmetcp_info->lock);
+
+ if (rc)
+ return rc;
+
+ rc = qed_nvmetcp_allocate_connection(p_hwfn, &p_conn);
+ if (rc) {
+ spin_lock_bh(&p_hwfn->p_nvmetcp_info->lock);
+ qed_cxt_release_cid(p_hwfn, icid);
+ spin_unlock_bh(&p_hwfn->p_nvmetcp_info->lock);
+
+ return rc;
+ }
+
+ p_conn->icid = icid;
+ p_conn->conn_id = (u16)icid;
+ p_conn->fw_cid = (p_hwfn->hw_info.opaque_fid << 16) | icid;
+ *p_out_conn = p_conn;
+
+ return rc;
+}
+
+static void qed_nvmetcp_release_connection(struct qed_hwfn *p_hwfn,
+ struct qed_nvmetcp_conn *p_conn)
+{
+ spin_lock_bh(&p_hwfn->p_nvmetcp_info->lock);
+ list_add_tail(&p_conn->list_entry, &p_hwfn->p_nvmetcp_info->free_list);
+ qed_cxt_release_cid(p_hwfn, p_conn->icid);
+ spin_unlock_bh(&p_hwfn->p_nvmetcp_info->lock);
+}
+
+static void qed_nvmetcp_free_connection(struct qed_hwfn *p_hwfn,
+ struct qed_nvmetcp_conn *p_conn)
+{
+ qed_chain_free(p_hwfn->cdev, &p_conn->xhq);
+ qed_chain_free(p_hwfn->cdev, &p_conn->uhq);
+ qed_chain_free(p_hwfn->cdev, &p_conn->r2tq);
+ kfree(p_conn);
+}
+
+int qed_nvmetcp_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_nvmetcp_info *p_nvmetcp_info;
+
+ p_nvmetcp_info = kzalloc(sizeof(*p_nvmetcp_info), GFP_KERNEL);
+ if (!p_nvmetcp_info)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&p_nvmetcp_info->free_list);
+ p_hwfn->p_nvmetcp_info = p_nvmetcp_info;
+
+ return 0;
+}
+
+void qed_nvmetcp_setup(struct qed_hwfn *p_hwfn)
+{
+ spin_lock_init(&p_hwfn->p_nvmetcp_info->lock);
+}
+
+void qed_nvmetcp_free(struct qed_hwfn *p_hwfn)
+{
+ struct qed_nvmetcp_conn *p_conn = NULL;
+
+ if (!p_hwfn->p_nvmetcp_info)
+ return;
+
+ while (!list_empty(&p_hwfn->p_nvmetcp_info->free_list)) {
+ p_conn = list_first_entry(&p_hwfn->p_nvmetcp_info->free_list,
+ struct qed_nvmetcp_conn, list_entry);
+ if (p_conn) {
+ list_del(&p_conn->list_entry);
+ qed_nvmetcp_free_connection(p_hwfn, p_conn);
+ }
+ }
+
+ kfree(p_hwfn->p_nvmetcp_info);
+ p_hwfn->p_nvmetcp_info = NULL;
+}
+
+static int qed_nvmetcp_acquire_conn(struct qed_dev *cdev,
+ u32 *handle,
+ u32 *fw_cid, void __iomem **p_doorbell)
+{
+ struct qed_hash_nvmetcp_con *hash_con;
+ int rc;
+
+ /* Allocate a hashed connection */
+ hash_con = kzalloc(sizeof(*hash_con), GFP_ATOMIC);
+ if (!hash_con)
+ return -ENOMEM;
+
+ /* Acquire the connection */
+ rc = qed_nvmetcp_acquire_connection(QED_AFFIN_HWFN(cdev),
+ &hash_con->con);
+ if (rc) {
+ DP_NOTICE(cdev, "Failed to acquire Connection\n");
+ kfree(hash_con);
+
+ return rc;
+ }
+
+ /* Added the connection to hash table */
+ *handle = hash_con->con->icid;
+ *fw_cid = hash_con->con->fw_cid;
+ hash_add(cdev->connections, &hash_con->node, *handle);
+ if (p_doorbell)
+ *p_doorbell = qed_nvmetcp_get_db_addr(QED_AFFIN_HWFN(cdev),
+ *handle);
+
+ return 0;
+}
+
+static int qed_nvmetcp_release_conn(struct qed_dev *cdev, u32 handle)
+{
+ struct qed_hash_nvmetcp_con *hash_con;
+
+ hash_con = qed_nvmetcp_get_hash(cdev, handle);
+ if (!hash_con) {
+ DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
+ handle);
+
+ return -EINVAL;
+ }
+
+ hlist_del(&hash_con->node);
+ qed_nvmetcp_release_connection(QED_AFFIN_HWFN(cdev), hash_con->con);
+ kfree(hash_con);
+
+ return 0;
+}
+
+static int qed_nvmetcp_offload_conn(struct qed_dev *cdev, u32 handle,
+ struct qed_nvmetcp_params_offload *conn_info)
+{
+ struct qed_hash_nvmetcp_con *hash_con;
+ struct qed_nvmetcp_conn *con;
+
+ hash_con = qed_nvmetcp_get_hash(cdev, handle);
+ if (!hash_con) {
+ DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
+ handle);
+
+ return -EINVAL;
+ }
+
+ /* Update the connection with information from the params */
+ con = hash_con->con;
+
+ /* FW initializations */
+ con->layer_code = NVMETCP_SLOW_PATH_LAYER_CODE;
+ con->sq_pbl_addr = conn_info->sq_pbl_addr;
+ con->nvmetcp_cccid_max_range = conn_info->nvmetcp_cccid_max_range;
+ con->nvmetcp_cccid_itid_table_addr = conn_info->nvmetcp_cccid_itid_table_addr;
+ con->default_cq = conn_info->default_cq;
+ SET_FIELD(con->offl_flags, NVMETCP_CONN_OFFLOAD_PARAMS_TARGET_MODE, 0);
+ SET_FIELD(con->offl_flags, NVMETCP_CONN_OFFLOAD_PARAMS_NVMETCP_MODE, 1);
+ SET_FIELD(con->offl_flags, NVMETCP_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B, 1);
+
+ /* Networking and TCP stack initializations */
+ ether_addr_copy(con->local_mac, conn_info->src.mac);
+ ether_addr_copy(con->remote_mac, conn_info->dst.mac);
+ memcpy(con->local_ip, conn_info->src.ip, sizeof(con->local_ip));
+ memcpy(con->remote_ip, conn_info->dst.ip, sizeof(con->remote_ip));
+ con->local_port = conn_info->src.port;
+ con->remote_port = conn_info->dst.port;
+ con->vlan_id = conn_info->vlan_id;
+
+ if (conn_info->timestamp_en)
+ SET_FIELD(con->tcp_flags, TCP_OFFLOAD_PARAMS_OPT2_TS_EN, 1);
+
+ if (conn_info->delayed_ack_en)
+ SET_FIELD(con->tcp_flags, TCP_OFFLOAD_PARAMS_OPT2_DA_EN, 1);
+
+ if (conn_info->tcp_keep_alive_en)
+ SET_FIELD(con->tcp_flags, TCP_OFFLOAD_PARAMS_OPT2_KA_EN, 1);
+
+ if (conn_info->ecn_en)
+ SET_FIELD(con->tcp_flags, TCP_OFFLOAD_PARAMS_OPT2_ECN_EN, 1);
+
+ con->ip_version = conn_info->ip_version;
+ con->flow_label = QED_TCP_FLOW_LABEL;
+ con->ka_max_probe_cnt = conn_info->ka_max_probe_cnt;
+ con->ka_timeout = conn_info->ka_timeout;
+ con->ka_interval = conn_info->ka_interval;
+ con->max_rt_time = conn_info->max_rt_time;
+ con->ttl = conn_info->ttl;
+ con->tos_or_tc = conn_info->tos_or_tc;
+ con->mss = conn_info->mss;
+ con->cwnd = conn_info->cwnd;
+ con->rcv_wnd_scale = conn_info->rcv_wnd_scale;
+ con->connect_mode = 0;
+
+ return qed_sp_nvmetcp_conn_offload(QED_AFFIN_HWFN(cdev), con,
+ QED_SPQ_MODE_EBLOCK, NULL);
+}
+
+static int qed_nvmetcp_update_conn(struct qed_dev *cdev,
+ u32 handle,
+ struct qed_nvmetcp_params_update *conn_info)
+{
+ struct qed_hash_nvmetcp_con *hash_con;
+ struct qed_nvmetcp_conn *con;
+
+ hash_con = qed_nvmetcp_get_hash(cdev, handle);
+ if (!hash_con) {
+ DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
+ handle);
+
+ return -EINVAL;
+ }
+
+ /* Update the connection with information from the params */
+ con = hash_con->con;
+ SET_FIELD(con->update_flag,
+ ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T, 0);
+ SET_FIELD(con->update_flag,
+ ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA, 1);
+ if (conn_info->hdr_digest_en)
+ SET_FIELD(con->update_flag, ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN, 1);
+
+ if (conn_info->data_digest_en)
+ SET_FIELD(con->update_flag, ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN, 1);
+
+ /* Placeholder - initialize pfv, cpda, hpda */
+
+ con->max_seq_size = conn_info->max_io_size;
+ con->max_recv_pdu_length = conn_info->max_recv_pdu_length;
+ con->max_send_pdu_length = conn_info->max_send_pdu_length;
+ con->first_seq_length = conn_info->max_io_size;
+
+ return qed_sp_nvmetcp_conn_update(QED_AFFIN_HWFN(cdev), con,
+ QED_SPQ_MODE_EBLOCK, NULL);
+}
+
+static int qed_nvmetcp_clear_conn_sq(struct qed_dev *cdev, u32 handle)
+{
+ struct qed_hash_nvmetcp_con *hash_con;
+
+ hash_con = qed_nvmetcp_get_hash(cdev, handle);
+ if (!hash_con) {
+ DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
+ handle);
+
+ return -EINVAL;
+ }
+
+ return qed_sp_nvmetcp_conn_clear_sq(QED_AFFIN_HWFN(cdev), hash_con->con,
+ QED_SPQ_MODE_EBLOCK, NULL);
+}
+
+static int qed_nvmetcp_destroy_conn(struct qed_dev *cdev,
+ u32 handle, u8 abrt_conn)
+{
+ struct qed_hash_nvmetcp_con *hash_con;
+
+ hash_con = qed_nvmetcp_get_hash(cdev, handle);
+ if (!hash_con) {
+ DP_NOTICE(cdev, "Failed to find connection for handle %d\n",
+ handle);
+
+ return -EINVAL;
+ }
+
+ hash_con->con->abortive_dsconnect = abrt_conn;
+
+ return qed_sp_nvmetcp_conn_terminate(QED_AFFIN_HWFN(cdev), hash_con->con,
+ QED_SPQ_MODE_EBLOCK, NULL);
+}
+
+static const struct qed_nvmetcp_ops qed_nvmetcp_ops_pass = {
+ .common = &qed_common_ops_pass,
+ .ll2 = &qed_ll2_ops_pass,
+ .fill_dev_info = &qed_fill_nvmetcp_dev_info,
+ .register_ops = &qed_register_nvmetcp_ops,
+ .start = &qed_nvmetcp_start,
+ .stop = &qed_nvmetcp_stop,
+ .acquire_conn = &qed_nvmetcp_acquire_conn,
+ .release_conn = &qed_nvmetcp_release_conn,
+ .offload_conn = &qed_nvmetcp_offload_conn,
+ .update_conn = &qed_nvmetcp_update_conn,
+ .destroy_conn = &qed_nvmetcp_destroy_conn,
+ .clear_sq = &qed_nvmetcp_clear_conn_sq,
+ .add_src_tcp_port_filter = &qed_llh_add_src_tcp_port_filter,
+ .remove_src_tcp_port_filter = &qed_llh_remove_src_tcp_port_filter,
+ .add_dst_tcp_port_filter = &qed_llh_add_dst_tcp_port_filter,
+ .remove_dst_tcp_port_filter = &qed_llh_remove_dst_tcp_port_filter,
+ .clear_all_filters = &qed_llh_clear_all_filters,
+ .init_read_io = &init_nvmetcp_host_read_task,
+ .init_write_io = &init_nvmetcp_host_write_task,
+ .init_icreq_exchange = &init_nvmetcp_init_conn_req_task,
+ .init_task_cleanup = &init_cleanup_task_nvmetcp
+};
+
+const struct qed_nvmetcp_ops *qed_get_nvmetcp_ops(void)
+{
+ return &qed_nvmetcp_ops_pass;
+}
+EXPORT_SYMBOL(qed_get_nvmetcp_ops);
+
+void qed_put_nvmetcp_ops(void)
+{
+}
+EXPORT_SYMBOL(qed_put_nvmetcp_ops);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_nvmetcp.h b/drivers/net/ethernet/qlogic/qed/qed_nvmetcp.h
new file mode 100644
index 000000000..e5e9d075b
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_nvmetcp.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* Copyright 2021 Marvell. All rights reserved. */
+
+#ifndef _QED_NVMETCP_H
+#define _QED_NVMETCP_H
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/qed/tcp_common.h>
+#include <linux/qed/qed_nvmetcp_if.h>
+#include <linux/qed/qed_chain.h>
+#include "qed.h"
+#include "qed_hsi.h"
+#include "qed_mcp.h"
+#include "qed_sp.h"
+
+#define QED_NVMETCP_FW_CQ_SIZE (4 * 1024)
+
+/* tcp parameters */
+#define QED_TCP_FLOW_LABEL 0
+#define QED_TCP_TWO_MSL_TIMER 4000
+#define QED_TCP_HALF_WAY_CLOSE_TIMEOUT 10
+#define QED_TCP_MAX_FIN_RT 2
+#define QED_TCP_SWS_TIMER 5000
+
+struct qed_nvmetcp_info {
+ spinlock_t lock; /* Connection resources. */
+ struct list_head free_list;
+ u16 max_num_outstanding_tasks;
+ void *event_context;
+ nvmetcp_event_cb_t event_cb;
+};
+
+struct qed_hash_nvmetcp_con {
+ struct hlist_node node;
+ struct qed_nvmetcp_conn *con;
+};
+
+struct qed_nvmetcp_conn {
+ struct list_head list_entry;
+ bool free_on_delete;
+ u16 conn_id;
+ u32 icid;
+ u32 fw_cid;
+ u8 layer_code;
+ u8 offl_flags;
+ u8 connect_mode;
+ dma_addr_t sq_pbl_addr;
+ struct qed_chain r2tq;
+ struct qed_chain xhq;
+ struct qed_chain uhq;
+ u8 local_mac[6];
+ u8 remote_mac[6];
+ u8 ip_version;
+ u8 ka_max_probe_cnt;
+ u16 vlan_id;
+ u16 tcp_flags;
+ u32 remote_ip[4];
+ u32 local_ip[4];
+ u32 flow_label;
+ u32 ka_timeout;
+ u32 ka_interval;
+ u32 max_rt_time;
+ u8 ttl;
+ u8 tos_or_tc;
+ u16 remote_port;
+ u16 local_port;
+ u16 mss;
+ u8 rcv_wnd_scale;
+ u32 rcv_wnd;
+ u32 cwnd;
+ u8 update_flag;
+ u8 default_cq;
+ u8 abortive_dsconnect;
+ u32 max_seq_size;
+ u32 max_recv_pdu_length;
+ u32 max_send_pdu_length;
+ u32 first_seq_length;
+ u16 physical_q0;
+ u16 physical_q1;
+ u16 nvmetcp_cccid_max_range;
+ dma_addr_t nvmetcp_cccid_itid_table_addr;
+};
+
+#if IS_ENABLED(CONFIG_QED_NVMETCP)
+int qed_nvmetcp_alloc(struct qed_hwfn *p_hwfn);
+void qed_nvmetcp_setup(struct qed_hwfn *p_hwfn);
+void qed_nvmetcp_free(struct qed_hwfn *p_hwfn);
+
+#else /* IS_ENABLED(CONFIG_QED_NVMETCP) */
+static inline int qed_nvmetcp_alloc(struct qed_hwfn *p_hwfn)
+{
+ return -EINVAL;
+}
+
+static inline void qed_nvmetcp_setup(struct qed_hwfn *p_hwfn) {}
+static inline void qed_nvmetcp_free(struct qed_hwfn *p_hwfn) {}
+
+#endif /* IS_ENABLED(CONFIG_QED_NVMETCP) */
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.c
new file mode 100644
index 000000000..3b84d00cf
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.c
@@ -0,0 +1,375 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/* Copyright 2021 Marvell. All rights reserved. */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/qed/common_hsi.h>
+#include <linux/qed/storage_common.h>
+#include <linux/qed/nvmetcp_common.h>
+#include <linux/qed/qed_nvmetcp_if.h>
+#include "qed_nvmetcp_fw_funcs.h"
+
+#define NVMETCP_NUM_SGES_IN_CACHE 0x4
+
+bool nvmetcp_is_slow_sgl(u16 num_sges, bool small_mid_sge)
+{
+ return (num_sges > SCSI_NUM_SGES_SLOW_SGL_THR && small_mid_sge);
+}
+
+void init_scsi_sgl_context(struct scsi_sgl_params *ctx_sgl_params,
+ struct scsi_cached_sges *ctx_data_desc,
+ struct storage_sgl_task_params *sgl_params)
+{
+ u8 num_sges_to_init = (u8)(sgl_params->num_sges > NVMETCP_NUM_SGES_IN_CACHE ?
+ NVMETCP_NUM_SGES_IN_CACHE : sgl_params->num_sges);
+ u8 sge_index;
+
+ /* sgl params */
+ ctx_sgl_params->sgl_addr.lo = cpu_to_le32(sgl_params->sgl_phys_addr.lo);
+ ctx_sgl_params->sgl_addr.hi = cpu_to_le32(sgl_params->sgl_phys_addr.hi);
+ ctx_sgl_params->sgl_total_length = cpu_to_le32(sgl_params->total_buffer_size);
+ ctx_sgl_params->sgl_num_sges = cpu_to_le16(sgl_params->num_sges);
+
+ for (sge_index = 0; sge_index < num_sges_to_init; sge_index++) {
+ ctx_data_desc->sge[sge_index].sge_addr.lo =
+ cpu_to_le32(sgl_params->sgl[sge_index].sge_addr.lo);
+ ctx_data_desc->sge[sge_index].sge_addr.hi =
+ cpu_to_le32(sgl_params->sgl[sge_index].sge_addr.hi);
+ ctx_data_desc->sge[sge_index].sge_len =
+ cpu_to_le32(sgl_params->sgl[sge_index].sge_len);
+ }
+}
+
+static inline u32 calc_rw_task_size(struct nvmetcp_task_params *task_params,
+ enum nvmetcp_task_type task_type)
+{
+ u32 io_size;
+
+ if (task_type == NVMETCP_TASK_TYPE_HOST_WRITE)
+ io_size = task_params->tx_io_size;
+ else
+ io_size = task_params->rx_io_size;
+
+ if (unlikely(!io_size))
+ return 0;
+
+ return io_size;
+}
+
+static inline void init_sqe(struct nvmetcp_task_params *task_params,
+ struct storage_sgl_task_params *sgl_task_params,
+ enum nvmetcp_task_type task_type)
+{
+ if (!task_params->sqe)
+ return;
+
+ memset(task_params->sqe, 0, sizeof(*task_params->sqe));
+ task_params->sqe->task_id = cpu_to_le16(task_params->itid);
+
+ switch (task_type) {
+ case NVMETCP_TASK_TYPE_HOST_WRITE: {
+ u32 buf_size = 0;
+ u32 num_sges = 0;
+
+ SET_FIELD(task_params->sqe->contlen_cdbsize,
+ NVMETCP_WQE_CDB_SIZE_OR_NVMETCP_CMD, 1);
+ SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_WQE_TYPE,
+ NVMETCP_WQE_TYPE_NORMAL);
+ if (task_params->tx_io_size) {
+ if (task_params->send_write_incapsule)
+ buf_size = calc_rw_task_size(task_params, task_type);
+
+ if (nvmetcp_is_slow_sgl(sgl_task_params->num_sges,
+ sgl_task_params->small_mid_sge))
+ num_sges = NVMETCP_WQE_NUM_SGES_SLOWIO;
+ else
+ num_sges = min((u16)sgl_task_params->num_sges,
+ (u16)SCSI_NUM_SGES_SLOW_SGL_THR);
+ }
+ SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_NUM_SGES, num_sges);
+ SET_FIELD(task_params->sqe->contlen_cdbsize, NVMETCP_WQE_CONT_LEN, buf_size);
+ } break;
+
+ case NVMETCP_TASK_TYPE_HOST_READ: {
+ SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_WQE_TYPE,
+ NVMETCP_WQE_TYPE_NORMAL);
+ SET_FIELD(task_params->sqe->contlen_cdbsize,
+ NVMETCP_WQE_CDB_SIZE_OR_NVMETCP_CMD, 1);
+ } break;
+
+ case NVMETCP_TASK_TYPE_INIT_CONN_REQUEST: {
+ SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_WQE_TYPE,
+ NVMETCP_WQE_TYPE_MIDDLE_PATH);
+
+ if (task_params->tx_io_size) {
+ SET_FIELD(task_params->sqe->contlen_cdbsize, NVMETCP_WQE_CONT_LEN,
+ task_params->tx_io_size);
+ SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_NUM_SGES,
+ min((u16)sgl_task_params->num_sges,
+ (u16)SCSI_NUM_SGES_SLOW_SGL_THR));
+ }
+ } break;
+
+ case NVMETCP_TASK_TYPE_CLEANUP:
+ SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_WQE_TYPE,
+ NVMETCP_WQE_TYPE_TASK_CLEANUP);
+
+ default:
+ break;
+ }
+}
+
+/* The following function initializes of NVMeTCP task params */
+static inline void
+init_nvmetcp_task_params(struct e5_nvmetcp_task_context *context,
+ struct nvmetcp_task_params *task_params,
+ enum nvmetcp_task_type task_type)
+{
+ context->ystorm_st_context.state.cccid = task_params->host_cccid;
+ SET_FIELD(context->ustorm_st_context.error_flags, USTORM_NVMETCP_TASK_ST_CTX_NVME_TCP, 1);
+ context->ustorm_st_context.nvme_tcp_opaque_lo = cpu_to_le32(task_params->opq.lo);
+ context->ustorm_st_context.nvme_tcp_opaque_hi = cpu_to_le32(task_params->opq.hi);
+}
+
+/* The following function initializes default values to all tasks */
+static inline void
+init_default_nvmetcp_task(struct nvmetcp_task_params *task_params,
+ void *pdu_header, void *nvme_cmd,
+ enum nvmetcp_task_type task_type)
+{
+ struct e5_nvmetcp_task_context *context = task_params->context;
+ const u8 val_byte = context->mstorm_ag_context.cdu_validation;
+ u8 dw_index;
+
+ memset(context, 0, sizeof(*context));
+ init_nvmetcp_task_params(context, task_params,
+ (enum nvmetcp_task_type)task_type);
+
+ /* Swapping requirements used below, will be removed in future FW versions */
+ if (task_type == NVMETCP_TASK_TYPE_HOST_WRITE ||
+ task_type == NVMETCP_TASK_TYPE_HOST_READ) {
+ for (dw_index = 0;
+ dw_index < QED_NVMETCP_CMN_HDR_SIZE / sizeof(u32);
+ dw_index++)
+ context->ystorm_st_context.pdu_hdr.task_hdr.reg[dw_index] =
+ cpu_to_le32(__swab32(((u32 *)pdu_header)[dw_index]));
+
+ for (dw_index = QED_NVMETCP_CMN_HDR_SIZE / sizeof(u32);
+ dw_index < QED_NVMETCP_CMD_HDR_SIZE / sizeof(u32);
+ dw_index++)
+ context->ystorm_st_context.pdu_hdr.task_hdr.reg[dw_index] =
+ cpu_to_le32(__swab32(((u32 *)nvme_cmd)[dw_index - 2]));
+ } else {
+ for (dw_index = 0;
+ dw_index < QED_NVMETCP_NON_IO_HDR_SIZE / sizeof(u32);
+ dw_index++)
+ context->ystorm_st_context.pdu_hdr.task_hdr.reg[dw_index] =
+ cpu_to_le32(__swab32(((u32 *)pdu_header)[dw_index]));
+ }
+
+ /* M-Storm Context: */
+ context->mstorm_ag_context.cdu_validation = val_byte;
+ context->mstorm_st_context.task_type = (u8)(task_type);
+ context->mstorm_ag_context.task_cid = cpu_to_le16(task_params->conn_icid);
+
+ /* Ustorm Context: */
+ SET_FIELD(context->ustorm_ag_context.flags1, E5_USTORM_NVMETCP_TASK_AG_CTX_R2T2RECV, 1);
+ context->ustorm_st_context.task_type = (u8)(task_type);
+ context->ustorm_st_context.cq_rss_number = task_params->cq_rss_number;
+ context->ustorm_ag_context.icid = cpu_to_le16(task_params->conn_icid);
+}
+
+/* The following function initializes the U-Storm Task Contexts */
+static inline void
+init_ustorm_task_contexts(struct ustorm_nvmetcp_task_st_ctx *ustorm_st_context,
+ struct e5_ustorm_nvmetcp_task_ag_ctx *ustorm_ag_context,
+ u32 remaining_recv_len,
+ u32 expected_data_transfer_len, u8 num_sges,
+ bool tx_dif_conn_err_en)
+{
+ /* Remaining data to be received in bytes. Used in validations*/
+ ustorm_st_context->rem_rcv_len = cpu_to_le32(remaining_recv_len);
+ ustorm_ag_context->exp_data_acked = cpu_to_le32(expected_data_transfer_len);
+ ustorm_st_context->exp_data_transfer_len = cpu_to_le32(expected_data_transfer_len);
+ SET_FIELD(ustorm_st_context->reg1_map, REG1_NUM_SGES, num_sges);
+ SET_FIELD(ustorm_ag_context->flags2, E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_CF_EN,
+ tx_dif_conn_err_en ? 1 : 0);
+}
+
+/* The following function initializes Local Completion Contexts: */
+static inline void
+set_local_completion_context(struct e5_nvmetcp_task_context *context)
+{
+ SET_FIELD(context->ystorm_st_context.state.flags,
+ YSTORM_NVMETCP_TASK_STATE_LOCAL_COMP, 1);
+ SET_FIELD(context->ustorm_st_context.flags,
+ USTORM_NVMETCP_TASK_ST_CTX_LOCAL_COMP, 1);
+}
+
+/* Common Fastpath task init function: */
+static inline void
+init_rw_nvmetcp_task(struct nvmetcp_task_params *task_params,
+ enum nvmetcp_task_type task_type,
+ void *pdu_header, void *nvme_cmd,
+ struct storage_sgl_task_params *sgl_task_params)
+{
+ struct e5_nvmetcp_task_context *context = task_params->context;
+ u32 task_size = calc_rw_task_size(task_params, task_type);
+ bool slow_io = false;
+ u8 num_sges = 0;
+
+ init_default_nvmetcp_task(task_params, pdu_header, nvme_cmd, task_type);
+
+ /* Tx/Rx: */
+ if (task_params->tx_io_size) {
+ /* if data to transmit: */
+ init_scsi_sgl_context(&context->ystorm_st_context.state.sgl_params,
+ &context->ystorm_st_context.state.data_desc,
+ sgl_task_params);
+ slow_io = nvmetcp_is_slow_sgl(sgl_task_params->num_sges,
+ sgl_task_params->small_mid_sge);
+ num_sges =
+ (u8)(!slow_io ? min((u32)sgl_task_params->num_sges,
+ (u32)SCSI_NUM_SGES_SLOW_SGL_THR) :
+ NVMETCP_WQE_NUM_SGES_SLOWIO);
+ if (slow_io) {
+ SET_FIELD(context->ystorm_st_context.state.flags,
+ YSTORM_NVMETCP_TASK_STATE_SLOW_IO, 1);
+ }
+ } else if (task_params->rx_io_size) {
+ /* if data to receive: */
+ init_scsi_sgl_context(&context->mstorm_st_context.sgl_params,
+ &context->mstorm_st_context.data_desc,
+ sgl_task_params);
+ num_sges =
+ (u8)(!nvmetcp_is_slow_sgl(sgl_task_params->num_sges,
+ sgl_task_params->small_mid_sge) ?
+ min((u32)sgl_task_params->num_sges,
+ (u32)SCSI_NUM_SGES_SLOW_SGL_THR) :
+ NVMETCP_WQE_NUM_SGES_SLOWIO);
+ context->mstorm_st_context.rem_task_size = cpu_to_le32(task_size);
+ }
+
+ /* Ustorm context: */
+ init_ustorm_task_contexts(&context->ustorm_st_context,
+ &context->ustorm_ag_context,
+ /* Remaining Receive length is the Task Size */
+ task_size,
+ /* The size of the transmitted task */
+ task_size,
+ /* num_sges */
+ num_sges,
+ false);
+
+ /* Set exp_data_acked */
+ if (task_type == NVMETCP_TASK_TYPE_HOST_WRITE) {
+ if (task_params->send_write_incapsule)
+ context->ustorm_ag_context.exp_data_acked = task_size;
+ else
+ context->ustorm_ag_context.exp_data_acked = 0;
+ } else if (task_type == NVMETCP_TASK_TYPE_HOST_READ) {
+ context->ustorm_ag_context.exp_data_acked = 0;
+ }
+
+ context->ustorm_ag_context.exp_cont_len = 0;
+ init_sqe(task_params, sgl_task_params, task_type);
+}
+
+static void
+init_common_initiator_read_task(struct nvmetcp_task_params *task_params,
+ struct nvme_tcp_cmd_pdu *cmd_pdu_header,
+ struct nvme_command *nvme_cmd,
+ struct storage_sgl_task_params *sgl_task_params)
+{
+ init_rw_nvmetcp_task(task_params, NVMETCP_TASK_TYPE_HOST_READ,
+ cmd_pdu_header, nvme_cmd, sgl_task_params);
+}
+
+void init_nvmetcp_host_read_task(struct nvmetcp_task_params *task_params,
+ struct nvme_tcp_cmd_pdu *cmd_pdu_header,
+ struct nvme_command *nvme_cmd,
+ struct storage_sgl_task_params *sgl_task_params)
+{
+ init_common_initiator_read_task(task_params, (void *)cmd_pdu_header,
+ (void *)nvme_cmd, sgl_task_params);
+}
+
+static void
+init_common_initiator_write_task(struct nvmetcp_task_params *task_params,
+ struct nvme_tcp_cmd_pdu *cmd_pdu_header,
+ struct nvme_command *nvme_cmd,
+ struct storage_sgl_task_params *sgl_task_params)
+{
+ init_rw_nvmetcp_task(task_params, NVMETCP_TASK_TYPE_HOST_WRITE,
+ cmd_pdu_header, nvme_cmd, sgl_task_params);
+}
+
+void init_nvmetcp_host_write_task(struct nvmetcp_task_params *task_params,
+ struct nvme_tcp_cmd_pdu *cmd_pdu_header,
+ struct nvme_command *nvme_cmd,
+ struct storage_sgl_task_params *sgl_task_params)
+{
+ init_common_initiator_write_task(task_params, (void *)cmd_pdu_header,
+ (void *)nvme_cmd, sgl_task_params);
+}
+
+static void
+init_common_login_request_task(struct nvmetcp_task_params *task_params,
+ void *login_req_pdu_header,
+ struct storage_sgl_task_params *tx_sgl_task_params,
+ struct storage_sgl_task_params *rx_sgl_task_params)
+{
+ struct e5_nvmetcp_task_context *context = task_params->context;
+
+ init_default_nvmetcp_task(task_params, (void *)login_req_pdu_header, NULL,
+ NVMETCP_TASK_TYPE_INIT_CONN_REQUEST);
+
+ /* Ustorm Context: */
+ init_ustorm_task_contexts(&context->ustorm_st_context,
+ &context->ustorm_ag_context,
+
+ /* Remaining Receive length is the Task Size */
+ task_params->rx_io_size ?
+ rx_sgl_task_params->total_buffer_size : 0,
+
+ /* The size of the transmitted task */
+ task_params->tx_io_size ?
+ tx_sgl_task_params->total_buffer_size : 0,
+ 0, /* num_sges */
+ 0); /* tx_dif_conn_err_en */
+
+ /* SGL context: */
+ if (task_params->tx_io_size)
+ init_scsi_sgl_context(&context->ystorm_st_context.state.sgl_params,
+ &context->ystorm_st_context.state.data_desc,
+ tx_sgl_task_params);
+ if (task_params->rx_io_size)
+ init_scsi_sgl_context(&context->mstorm_st_context.sgl_params,
+ &context->mstorm_st_context.data_desc,
+ rx_sgl_task_params);
+
+ context->mstorm_st_context.rem_task_size =
+ cpu_to_le32(task_params->rx_io_size ?
+ rx_sgl_task_params->total_buffer_size : 0);
+ init_sqe(task_params, tx_sgl_task_params, NVMETCP_TASK_TYPE_INIT_CONN_REQUEST);
+}
+
+/* The following function initializes Login task in Host mode: */
+void init_nvmetcp_init_conn_req_task(struct nvmetcp_task_params *task_params,
+ struct nvme_tcp_icreq_pdu *init_conn_req_pdu_hdr,
+ struct storage_sgl_task_params *tx_sgl_task_params,
+ struct storage_sgl_task_params *rx_sgl_task_params)
+{
+ init_common_login_request_task(task_params, init_conn_req_pdu_hdr,
+ tx_sgl_task_params, rx_sgl_task_params);
+}
+
+void init_cleanup_task_nvmetcp(struct nvmetcp_task_params *task_params)
+{
+ init_sqe(task_params, NULL, NVMETCP_TASK_TYPE_CLEANUP);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.h b/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.h
new file mode 100644
index 000000000..1d5ddc217
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* Copyright 2021 Marvell. All rights reserved. */
+
+#ifndef _QED_NVMETCP_FW_FUNCS_H
+#define _QED_NVMETCP_FW_FUNCS_H
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/qed/common_hsi.h>
+#include <linux/qed/storage_common.h>
+#include <linux/qed/nvmetcp_common.h>
+#include <linux/qed/qed_nvmetcp_if.h>
+
+#if IS_ENABLED(CONFIG_QED_NVMETCP)
+
+void init_nvmetcp_host_read_task(struct nvmetcp_task_params *task_params,
+ struct nvme_tcp_cmd_pdu *cmd_pdu_header,
+ struct nvme_command *nvme_cmd,
+ struct storage_sgl_task_params *sgl_task_params);
+void init_nvmetcp_host_write_task(struct nvmetcp_task_params *task_params,
+ struct nvme_tcp_cmd_pdu *cmd_pdu_header,
+ struct nvme_command *nvme_cmd,
+ struct storage_sgl_task_params *sgl_task_params);
+void init_nvmetcp_init_conn_req_task(struct nvmetcp_task_params *task_params,
+ struct nvme_tcp_icreq_pdu *init_conn_req_pdu_hdr,
+ struct storage_sgl_task_params *tx_sgl_task_params,
+ struct storage_sgl_task_params *rx_sgl_task_params);
+void init_cleanup_task_nvmetcp(struct nvmetcp_task_params *task_params);
+
+#else /* IS_ENABLED(CONFIG_QED_NVMETCP) */
+
+#endif /* IS_ENABLED(CONFIG_QED_NVMETCP) */
+
+#endif /* _QED_NVMETCP_FW_FUNCS_H */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.c b/drivers/net/ethernet/qlogic/qed/qed_ooo.c
new file mode 100644
index 000000000..5d725f59d
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.c
@@ -0,0 +1,466 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ * Copyright (c) 2019-2020 Marvell International Ltd.
+ */
+
+#include <linux/types.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include "qed.h"
+#include "qed_iscsi.h"
+#include "qed_ll2.h"
+#include "qed_ooo.h"
+#include "qed_cxt.h"
+#include "qed_nvmetcp.h"
+static struct qed_ooo_archipelago
+*qed_ooo_seek_archipelago(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info
+ *p_ooo_info,
+ u32 cid)
+{
+ u32 idx = (cid & 0xffff) - p_ooo_info->cid_base;
+ struct qed_ooo_archipelago *p_archipelago;
+
+ if (unlikely(idx >= p_ooo_info->max_num_archipelagos))
+ return NULL;
+
+ p_archipelago = &p_ooo_info->p_archipelagos_mem[idx];
+
+ if (unlikely(list_empty(&p_archipelago->isles_list)))
+ return NULL;
+
+ return p_archipelago;
+}
+
+static struct qed_ooo_isle *qed_ooo_seek_isle(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info,
+ u32 cid, u8 isle)
+{
+ struct qed_ooo_archipelago *p_archipelago = NULL;
+ struct qed_ooo_isle *p_isle = NULL;
+ u8 the_num_of_isle = 1;
+
+ p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid);
+ if (unlikely(!p_archipelago)) {
+ DP_NOTICE(p_hwfn,
+ "Connection %d is not found in OOO list\n", cid);
+ return NULL;
+ }
+
+ list_for_each_entry(p_isle, &p_archipelago->isles_list, list_entry) {
+ if (the_num_of_isle == isle)
+ return p_isle;
+ the_num_of_isle++;
+ }
+
+ return NULL;
+}
+
+void qed_ooo_save_history_entry(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info,
+ struct ooo_opaque *p_cqe)
+{
+ struct qed_ooo_history *p_history = &p_ooo_info->ooo_history;
+
+ if (p_history->head_idx == p_history->num_of_cqes)
+ p_history->head_idx = 0;
+ p_history->p_cqes[p_history->head_idx] = *p_cqe;
+ p_history->head_idx++;
+}
+
+int qed_ooo_alloc(struct qed_hwfn *p_hwfn)
+{
+ u16 max_num_archipelagos = 0, cid_base;
+ struct qed_ooo_info *p_ooo_info;
+ enum protocol_type proto;
+ u16 max_num_isles = 0;
+ u32 i;
+
+ switch (p_hwfn->hw_info.personality) {
+ case QED_PCI_ISCSI:
+ case QED_PCI_NVMETCP:
+ proto = PROTOCOLID_TCP_ULP;
+ break;
+ case QED_PCI_ETH_RDMA:
+ case QED_PCI_ETH_IWARP:
+ proto = PROTOCOLID_IWARP;
+ break;
+ default:
+ DP_NOTICE(p_hwfn,
+ "Failed to allocate qed_ooo_info: unknown personality\n");
+ return -EINVAL;
+ }
+
+ max_num_archipelagos = (u16)qed_cxt_get_proto_cid_count(p_hwfn, proto,
+ NULL);
+ max_num_isles = QED_MAX_NUM_ISLES + max_num_archipelagos;
+ cid_base = (u16)qed_cxt_get_proto_cid_start(p_hwfn, proto);
+
+ if (!max_num_archipelagos) {
+ DP_NOTICE(p_hwfn,
+ "Failed to allocate qed_ooo_info: unknown amount of connections\n");
+ return -EINVAL;
+ }
+
+ p_ooo_info = kzalloc(sizeof(*p_ooo_info), GFP_KERNEL);
+ if (!p_ooo_info)
+ return -ENOMEM;
+
+ p_ooo_info->cid_base = cid_base;
+ p_ooo_info->max_num_archipelagos = max_num_archipelagos;
+
+ INIT_LIST_HEAD(&p_ooo_info->free_buffers_list);
+ INIT_LIST_HEAD(&p_ooo_info->ready_buffers_list);
+ INIT_LIST_HEAD(&p_ooo_info->free_isles_list);
+
+ p_ooo_info->p_isles_mem = kcalloc(max_num_isles,
+ sizeof(struct qed_ooo_isle),
+ GFP_KERNEL);
+ if (!p_ooo_info->p_isles_mem)
+ goto no_isles_mem;
+
+ for (i = 0; i < max_num_isles; i++) {
+ INIT_LIST_HEAD(&p_ooo_info->p_isles_mem[i].buffers_list);
+ list_add_tail(&p_ooo_info->p_isles_mem[i].list_entry,
+ &p_ooo_info->free_isles_list);
+ }
+
+ p_ooo_info->p_archipelagos_mem =
+ kcalloc(max_num_archipelagos,
+ sizeof(struct qed_ooo_archipelago),
+ GFP_KERNEL);
+ if (!p_ooo_info->p_archipelagos_mem)
+ goto no_archipelagos_mem;
+
+ for (i = 0; i < max_num_archipelagos; i++)
+ INIT_LIST_HEAD(&p_ooo_info->p_archipelagos_mem[i].isles_list);
+
+ p_ooo_info->ooo_history.p_cqes =
+ kcalloc(QED_MAX_NUM_OOO_HISTORY_ENTRIES,
+ sizeof(struct ooo_opaque),
+ GFP_KERNEL);
+ if (!p_ooo_info->ooo_history.p_cqes)
+ goto no_history_mem;
+
+ p_ooo_info->ooo_history.num_of_cqes = QED_MAX_NUM_OOO_HISTORY_ENTRIES;
+
+ p_hwfn->p_ooo_info = p_ooo_info;
+ return 0;
+
+no_history_mem:
+ kfree(p_ooo_info->p_archipelagos_mem);
+no_archipelagos_mem:
+ kfree(p_ooo_info->p_isles_mem);
+no_isles_mem:
+ kfree(p_ooo_info);
+ return -ENOMEM;
+}
+
+void qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info, u32 cid)
+{
+ struct qed_ooo_archipelago *p_archipelago;
+ struct qed_ooo_buffer *p_buffer;
+ struct qed_ooo_isle *p_isle;
+
+ p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid);
+ if (!p_archipelago)
+ return;
+
+ while (!list_empty(&p_archipelago->isles_list)) {
+ p_isle = list_first_entry(&p_archipelago->isles_list,
+ struct qed_ooo_isle, list_entry);
+
+ list_del(&p_isle->list_entry);
+
+ while (!list_empty(&p_isle->buffers_list)) {
+ p_buffer = list_first_entry(&p_isle->buffers_list,
+ struct qed_ooo_buffer,
+ list_entry);
+
+ if (!p_buffer)
+ break;
+
+ list_move_tail(&p_buffer->list_entry,
+ &p_ooo_info->free_buffers_list);
+ }
+ list_add_tail(&p_isle->list_entry,
+ &p_ooo_info->free_isles_list);
+ }
+}
+
+void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info)
+{
+ struct qed_ooo_archipelago *p_archipelago;
+ struct qed_ooo_buffer *p_buffer;
+ struct qed_ooo_isle *p_isle;
+ u32 i;
+
+ for (i = 0; i < p_ooo_info->max_num_archipelagos; i++) {
+ p_archipelago = &(p_ooo_info->p_archipelagos_mem[i]);
+
+ while (!list_empty(&p_archipelago->isles_list)) {
+ p_isle = list_first_entry(&p_archipelago->isles_list,
+ struct qed_ooo_isle,
+ list_entry);
+
+ list_del(&p_isle->list_entry);
+
+ while (!list_empty(&p_isle->buffers_list)) {
+ p_buffer =
+ list_first_entry(&p_isle->buffers_list,
+ struct qed_ooo_buffer,
+ list_entry);
+
+ if (!p_buffer)
+ break;
+
+ list_move_tail(&p_buffer->list_entry,
+ &p_ooo_info->free_buffers_list);
+ }
+ list_add_tail(&p_isle->list_entry,
+ &p_ooo_info->free_isles_list);
+ }
+ }
+ if (!list_empty(&p_ooo_info->ready_buffers_list))
+ list_splice_tail_init(&p_ooo_info->ready_buffers_list,
+ &p_ooo_info->free_buffers_list);
+}
+
+void qed_ooo_setup(struct qed_hwfn *p_hwfn)
+{
+ qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
+ memset(p_hwfn->p_ooo_info->ooo_history.p_cqes, 0,
+ p_hwfn->p_ooo_info->ooo_history.num_of_cqes *
+ sizeof(struct ooo_opaque));
+ p_hwfn->p_ooo_info->ooo_history.head_idx = 0;
+}
+
+void qed_ooo_free(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ooo_info *p_ooo_info = p_hwfn->p_ooo_info;
+ struct qed_ooo_buffer *p_buffer;
+
+ if (!p_ooo_info)
+ return;
+
+ qed_ooo_release_all_isles(p_hwfn, p_ooo_info);
+ while (!list_empty(&p_ooo_info->free_buffers_list)) {
+ p_buffer = list_first_entry(&p_ooo_info->free_buffers_list,
+ struct qed_ooo_buffer, list_entry);
+
+ if (!p_buffer)
+ break;
+
+ list_del(&p_buffer->list_entry);
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ p_buffer->rx_buffer_size,
+ p_buffer->rx_buffer_virt_addr,
+ p_buffer->rx_buffer_phys_addr);
+ kfree(p_buffer);
+ }
+
+ kfree(p_ooo_info->p_isles_mem);
+ kfree(p_ooo_info->p_archipelagos_mem);
+ kfree(p_ooo_info->ooo_history.p_cqes);
+ kfree(p_ooo_info);
+ p_hwfn->p_ooo_info = NULL;
+}
+
+void qed_ooo_put_free_buffer(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info,
+ struct qed_ooo_buffer *p_buffer)
+{
+ list_add_tail(&p_buffer->list_entry, &p_ooo_info->free_buffers_list);
+}
+
+struct qed_ooo_buffer *qed_ooo_get_free_buffer(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info)
+{
+ struct qed_ooo_buffer *p_buffer = NULL;
+
+ if (!list_empty(&p_ooo_info->free_buffers_list)) {
+ p_buffer = list_first_entry(&p_ooo_info->free_buffers_list,
+ struct qed_ooo_buffer, list_entry);
+
+ list_del(&p_buffer->list_entry);
+ }
+
+ return p_buffer;
+}
+
+void qed_ooo_put_ready_buffer(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info,
+ struct qed_ooo_buffer *p_buffer, u8 on_tail)
+{
+ if (on_tail)
+ list_add_tail(&p_buffer->list_entry,
+ &p_ooo_info->ready_buffers_list);
+ else
+ list_add(&p_buffer->list_entry,
+ &p_ooo_info->ready_buffers_list);
+}
+
+struct qed_ooo_buffer *qed_ooo_get_ready_buffer(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info)
+{
+ struct qed_ooo_buffer *p_buffer = NULL;
+
+ if (!list_empty(&p_ooo_info->ready_buffers_list)) {
+ p_buffer = list_first_entry(&p_ooo_info->ready_buffers_list,
+ struct qed_ooo_buffer, list_entry);
+
+ list_del(&p_buffer->list_entry);
+ }
+
+ return p_buffer;
+}
+
+void qed_ooo_delete_isles(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info,
+ u32 cid, u8 drop_isle, u8 drop_size)
+{
+ struct qed_ooo_isle *p_isle = NULL;
+ u8 isle_idx;
+
+ for (isle_idx = 0; isle_idx < drop_size; isle_idx++) {
+ p_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid, drop_isle);
+ if (!p_isle) {
+ DP_NOTICE(p_hwfn,
+ "Isle %d is not found(cid %d)\n",
+ drop_isle, cid);
+ return;
+ }
+ if (list_empty(&p_isle->buffers_list))
+ DP_NOTICE(p_hwfn,
+ "Isle %d is empty(cid %d)\n", drop_isle, cid);
+ else
+ list_splice_tail_init(&p_isle->buffers_list,
+ &p_ooo_info->free_buffers_list);
+
+ list_del(&p_isle->list_entry);
+ p_ooo_info->cur_isles_number--;
+ list_add(&p_isle->list_entry, &p_ooo_info->free_isles_list);
+ }
+}
+
+void qed_ooo_add_new_isle(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info,
+ u32 cid, u8 ooo_isle,
+ struct qed_ooo_buffer *p_buffer)
+{
+ struct qed_ooo_archipelago *p_archipelago = NULL;
+ struct qed_ooo_isle *p_prev_isle = NULL;
+ struct qed_ooo_isle *p_isle = NULL;
+
+ if (ooo_isle > 1) {
+ p_prev_isle = qed_ooo_seek_isle(p_hwfn,
+ p_ooo_info, cid, ooo_isle - 1);
+ if (unlikely(!p_prev_isle)) {
+ DP_NOTICE(p_hwfn,
+ "Isle %d is not found(cid %d)\n",
+ ooo_isle - 1, cid);
+ return;
+ }
+ }
+ p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid);
+ if (unlikely(!p_archipelago && ooo_isle != 1)) {
+ DP_NOTICE(p_hwfn,
+ "Connection %d is not found in OOO list\n", cid);
+ return;
+ }
+
+ if (!list_empty(&p_ooo_info->free_isles_list)) {
+ p_isle = list_first_entry(&p_ooo_info->free_isles_list,
+ struct qed_ooo_isle, list_entry);
+
+ list_del(&p_isle->list_entry);
+ if (unlikely(!list_empty(&p_isle->buffers_list))) {
+ DP_NOTICE(p_hwfn, "Free isle is not empty\n");
+ INIT_LIST_HEAD(&p_isle->buffers_list);
+ }
+ } else {
+ DP_NOTICE(p_hwfn, "No more free isles\n");
+ return;
+ }
+
+ if (!p_archipelago) {
+ u32 idx = (cid & 0xffff) - p_ooo_info->cid_base;
+
+ p_archipelago = &p_ooo_info->p_archipelagos_mem[idx];
+ }
+
+ list_add(&p_buffer->list_entry, &p_isle->buffers_list);
+ p_ooo_info->cur_isles_number++;
+ p_ooo_info->gen_isles_number++;
+
+ if (p_ooo_info->cur_isles_number > p_ooo_info->max_isles_number)
+ p_ooo_info->max_isles_number = p_ooo_info->cur_isles_number;
+
+ if (!p_prev_isle)
+ list_add(&p_isle->list_entry, &p_archipelago->isles_list);
+ else
+ list_add(&p_isle->list_entry, &p_prev_isle->list_entry);
+}
+
+void qed_ooo_add_new_buffer(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info,
+ u32 cid,
+ u8 ooo_isle,
+ struct qed_ooo_buffer *p_buffer, u8 buffer_side)
+{
+ struct qed_ooo_isle *p_isle = NULL;
+
+ p_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid, ooo_isle);
+ if (unlikely(!p_isle)) {
+ DP_NOTICE(p_hwfn,
+ "Isle %d is not found(cid %d)\n", ooo_isle, cid);
+ return;
+ }
+
+ if (unlikely(buffer_side == QED_OOO_LEFT_BUF))
+ list_add(&p_buffer->list_entry, &p_isle->buffers_list);
+ else
+ list_add_tail(&p_buffer->list_entry, &p_isle->buffers_list);
+}
+
+void qed_ooo_join_isles(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info, u32 cid, u8 left_isle)
+{
+ struct qed_ooo_isle *p_right_isle = NULL;
+ struct qed_ooo_isle *p_left_isle = NULL;
+
+ p_right_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid,
+ left_isle + 1);
+ if (unlikely(!p_right_isle)) {
+ DP_NOTICE(p_hwfn,
+ "Right isle %d is not found(cid %d)\n",
+ left_isle + 1, cid);
+ return;
+ }
+
+ list_del(&p_right_isle->list_entry);
+ p_ooo_info->cur_isles_number--;
+ if (left_isle) {
+ p_left_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid,
+ left_isle);
+ if (unlikely(!p_left_isle)) {
+ DP_NOTICE(p_hwfn,
+ "Left isle %d is not found(cid %d)\n",
+ left_isle, cid);
+ return;
+ }
+ list_splice_tail_init(&p_right_isle->buffers_list,
+ &p_left_isle->buffers_list);
+ } else {
+ list_splice_tail_init(&p_right_isle->buffers_list,
+ &p_ooo_info->ready_buffers_list);
+ }
+ list_add_tail(&p_right_isle->list_entry, &p_ooo_info->free_isles_list);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.h b/drivers/net/ethernet/qlogic/qed/qed_ooo.h
new file mode 100644
index 000000000..3a7e1b59d
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.h
@@ -0,0 +1,172 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ * Copyright (c) 2019-2020 Marvell International Ltd.
+ */
+
+#ifndef _QED_OOO_H
+#define _QED_OOO_H
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include "qed.h"
+
+#define QED_MAX_NUM_ISLES 256
+#define QED_MAX_NUM_OOO_HISTORY_ENTRIES 512
+
+#define QED_OOO_LEFT_BUF 0
+#define QED_OOO_RIGHT_BUF 1
+
+struct qed_ooo_buffer {
+ struct list_head list_entry;
+ void *rx_buffer_virt_addr;
+ dma_addr_t rx_buffer_phys_addr;
+ u32 rx_buffer_size;
+ u16 packet_length;
+ u16 parse_flags;
+ u16 vlan;
+ u8 placement_offset;
+};
+
+struct qed_ooo_isle {
+ struct list_head list_entry;
+ struct list_head buffers_list;
+};
+
+struct qed_ooo_archipelago {
+ struct list_head isles_list;
+};
+
+struct qed_ooo_history {
+ struct ooo_opaque *p_cqes;
+ u32 head_idx;
+ u32 num_of_cqes;
+};
+
+struct qed_ooo_info {
+ struct list_head free_buffers_list;
+ struct list_head ready_buffers_list;
+ struct list_head free_isles_list;
+ struct qed_ooo_archipelago *p_archipelagos_mem;
+ struct qed_ooo_isle *p_isles_mem;
+ struct qed_ooo_history ooo_history;
+ u32 cur_isles_number;
+ u32 max_isles_number;
+ u32 gen_isles_number;
+ u16 max_num_archipelagos;
+ u16 cid_base;
+};
+
+#if IS_ENABLED(CONFIG_QED_OOO)
+void qed_ooo_save_history_entry(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info,
+ struct ooo_opaque *p_cqe);
+
+int qed_ooo_alloc(struct qed_hwfn *p_hwfn);
+
+void qed_ooo_setup(struct qed_hwfn *p_hwfn);
+
+void qed_ooo_free(struct qed_hwfn *p_hwfn);
+
+void qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info,
+ u32 cid);
+
+void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info);
+
+void qed_ooo_put_free_buffer(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info,
+ struct qed_ooo_buffer *p_buffer);
+
+struct qed_ooo_buffer *
+qed_ooo_get_free_buffer(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info);
+
+void qed_ooo_put_ready_buffer(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info,
+ struct qed_ooo_buffer *p_buffer, u8 on_tail);
+
+struct qed_ooo_buffer *
+qed_ooo_get_ready_buffer(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info);
+
+void qed_ooo_delete_isles(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info,
+ u32 cid, u8 drop_isle, u8 drop_size);
+
+void qed_ooo_add_new_isle(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info,
+ u32 cid,
+ u8 ooo_isle, struct qed_ooo_buffer *p_buffer);
+
+void qed_ooo_add_new_buffer(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info,
+ u32 cid,
+ u8 ooo_isle,
+ struct qed_ooo_buffer *p_buffer, u8 buffer_side);
+
+void qed_ooo_join_isles(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info, u32 cid,
+ u8 left_isle);
+#else /* IS_ENABLED(CONFIG_QED_ISCSI) */
+static inline void qed_ooo_save_history_entry(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info,
+ struct ooo_opaque *p_cqe) {}
+
+static inline int qed_ooo_alloc(struct qed_hwfn *p_hwfn)
+{
+ return -EINVAL;
+}
+
+static inline void qed_ooo_setup(struct qed_hwfn *p_hwfn) {}
+
+static inline void qed_ooo_free(struct qed_hwfn *p_hwfn) {}
+
+static inline void
+qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info,
+ u32 cid) {}
+
+static inline void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info)
+ {}
+
+static inline void qed_ooo_put_free_buffer(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info,
+ struct qed_ooo_buffer *p_buffer) {}
+
+static inline struct qed_ooo_buffer *
+qed_ooo_get_free_buffer(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info) { return NULL; }
+
+static inline void qed_ooo_put_ready_buffer(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info,
+ struct qed_ooo_buffer *p_buffer,
+ u8 on_tail) {}
+
+static inline struct qed_ooo_buffer *
+qed_ooo_get_ready_buffer(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info) { return NULL; }
+
+static inline void qed_ooo_delete_isles(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info,
+ u32 cid, u8 drop_isle, u8 drop_size) {}
+
+static inline void qed_ooo_add_new_isle(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info,
+ u32 cid, u8 ooo_isle,
+ struct qed_ooo_buffer *p_buffer) {}
+
+static inline void qed_ooo_add_new_buffer(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info,
+ u32 cid, u8 ooo_isle,
+ struct qed_ooo_buffer *p_buffer,
+ u8 buffer_side) {}
+
+static inline void qed_ooo_join_isles(struct qed_hwfn *p_hwfn,
+ struct qed_ooo_info *p_ooo_info, u32 cid,
+ u8 left_isle) {}
+#endif /* IS_ENABLED(CONFIG_QED_ISCSI) */
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ptp.c b/drivers/net/ethernet/qlogic/qed/qed_ptp.c
new file mode 100644
index 000000000..295ce435a
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_ptp.c
@@ -0,0 +1,433 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ * Copyright (c) 2019-2020 Marvell International Ltd.
+ */
+
+#include <linux/types.h>
+#include "qed.h"
+#include "qed_dev_api.h"
+#include "qed_hw.h"
+#include "qed_l2.h"
+#include "qed_mcp.h"
+#include "qed_ptp.h"
+#include "qed_reg_addr.h"
+
+/* 16 nano second time quantas to wait before making a Drift adjustment */
+#define QED_DRIFT_CNTR_TIME_QUANTA_SHIFT 0
+/* Nano seconds to add/subtract when making a Drift adjustment */
+#define QED_DRIFT_CNTR_ADJUSTMENT_SHIFT 28
+/* Add/subtract the Adjustment_Value when making a Drift adjustment */
+#define QED_DRIFT_CNTR_DIRECTION_SHIFT 31
+#define QED_TIMESTAMP_MASK BIT(16)
+/* Param mask for Hardware to detect/timestamp the L2/L4 unicast PTP packets */
+#define QED_PTP_UCAST_PARAM_MASK 0x70F
+
+static enum qed_resc_lock qed_ptcdev_to_resc(struct qed_hwfn *p_hwfn)
+{
+ switch (MFW_PORT(p_hwfn)) {
+ case 0:
+ return QED_RESC_LOCK_PTP_PORT0;
+ case 1:
+ return QED_RESC_LOCK_PTP_PORT1;
+ case 2:
+ return QED_RESC_LOCK_PTP_PORT2;
+ case 3:
+ return QED_RESC_LOCK_PTP_PORT3;
+ default:
+ return QED_RESC_LOCK_RESC_INVALID;
+ }
+}
+
+static int qed_ptp_res_lock(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_resc_lock_params params;
+ enum qed_resc_lock resource;
+ int rc;
+
+ resource = qed_ptcdev_to_resc(p_hwfn);
+ if (resource == QED_RESC_LOCK_RESC_INVALID)
+ return -EINVAL;
+
+ qed_mcp_resc_lock_default_init(&params, NULL, resource, true);
+
+ rc = qed_mcp_resc_lock(p_hwfn, p_ptt, &params);
+ if (rc && rc != -EINVAL) {
+ return rc;
+ } else if (rc == -EINVAL) {
+ /* MFW doesn't support resource locking, first PF on the port
+ * has lock ownership.
+ */
+ if (p_hwfn->abs_pf_id < p_hwfn->cdev->num_ports_in_engine)
+ return 0;
+
+ DP_INFO(p_hwfn, "PF doesn't have lock ownership\n");
+ return -EBUSY;
+ } else if (!params.b_granted) {
+ DP_INFO(p_hwfn, "Failed to acquire ptp resource lock\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int qed_ptp_res_unlock(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_resc_unlock_params params;
+ enum qed_resc_lock resource;
+ int rc;
+
+ resource = qed_ptcdev_to_resc(p_hwfn);
+ if (resource == QED_RESC_LOCK_RESC_INVALID)
+ return -EINVAL;
+
+ qed_mcp_resc_lock_default_init(NULL, &params, resource, true);
+
+ rc = qed_mcp_resc_unlock(p_hwfn, p_ptt, &params);
+ if (rc == -EINVAL) {
+ /* MFW doesn't support locking, first PF has lock ownership */
+ if (p_hwfn->abs_pf_id < p_hwfn->cdev->num_ports_in_engine) {
+ rc = 0;
+ } else {
+ DP_INFO(p_hwfn, "PF doesn't have lock ownership\n");
+ return -EINVAL;
+ }
+ } else if (rc) {
+ DP_INFO(p_hwfn, "Failed to release the ptp resource lock\n");
+ }
+
+ return rc;
+}
+
+/* Read Rx timestamp */
+static int qed_ptp_hw_read_rx_ts(struct qed_dev *cdev, u64 *timestamp)
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
+ u32 val;
+
+ *timestamp = 0;
+ val = qed_rd(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID);
+ if (!(val & QED_TIMESTAMP_MASK)) {
+ DP_INFO(p_hwfn, "Invalid Rx timestamp, buf_seqid = %d\n", val);
+ return -EINVAL;
+ }
+
+ val = qed_rd(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_TS_LSB);
+ *timestamp = qed_rd(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_TS_MSB);
+ *timestamp <<= 32;
+ *timestamp |= val;
+
+ /* Reset timestamp register to allow new timestamp */
+ qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID,
+ QED_TIMESTAMP_MASK);
+
+ return 0;
+}
+
+/* Read Tx timestamp */
+static int qed_ptp_hw_read_tx_ts(struct qed_dev *cdev, u64 *timestamp)
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
+ u32 val;
+
+ *timestamp = 0;
+ val = qed_rd(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_SEQID);
+ if (!(val & QED_TIMESTAMP_MASK)) {
+ DP_VERBOSE(p_hwfn, QED_MSG_DEBUG,
+ "Invalid Tx timestamp, buf_seqid = %08x\n", val);
+ return -EINVAL;
+ }
+
+ val = qed_rd(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_TS_LSB);
+ *timestamp = qed_rd(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_TS_MSB);
+ *timestamp <<= 32;
+ *timestamp |= val;
+
+ /* Reset timestamp register to allow new timestamp */
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_SEQID, QED_TIMESTAMP_MASK);
+
+ return 0;
+}
+
+/* Read Phy Hardware Clock */
+static int qed_ptp_hw_read_cc(struct qed_dev *cdev, u64 *phc_cycles)
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
+ u32 temp = 0;
+
+ temp = qed_rd(p_hwfn, p_ptt, NIG_REG_TSGEN_SYNC_TIME_LSB);
+ *phc_cycles = qed_rd(p_hwfn, p_ptt, NIG_REG_TSGEN_SYNC_TIME_MSB);
+ *phc_cycles <<= 32;
+ *phc_cycles |= temp;
+
+ return 0;
+}
+
+/* Filter PTP protocol packets that need to be timestamped */
+static int qed_ptp_hw_cfg_filters(struct qed_dev *cdev,
+ enum qed_ptp_filter_type rx_type,
+ enum qed_ptp_hwtstamp_tx_type tx_type)
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
+ u32 rule_mask, enable_cfg = 0x0;
+
+ switch (rx_type) {
+ case QED_PTP_FILTER_NONE:
+ enable_cfg = 0x0;
+ rule_mask = 0x3FFF;
+ break;
+ case QED_PTP_FILTER_ALL:
+ enable_cfg = 0x7;
+ rule_mask = 0x3CAA;
+ break;
+ case QED_PTP_FILTER_V1_L4_EVENT:
+ enable_cfg = 0x3;
+ rule_mask = 0x3FFA;
+ break;
+ case QED_PTP_FILTER_V1_L4_GEN:
+ enable_cfg = 0x3;
+ rule_mask = 0x3FFE;
+ break;
+ case QED_PTP_FILTER_V2_L4_EVENT:
+ enable_cfg = 0x5;
+ rule_mask = 0x3FAA;
+ break;
+ case QED_PTP_FILTER_V2_L4_GEN:
+ enable_cfg = 0x5;
+ rule_mask = 0x3FEE;
+ break;
+ case QED_PTP_FILTER_V2_L2_EVENT:
+ enable_cfg = 0x5;
+ rule_mask = 0x3CFF;
+ break;
+ case QED_PTP_FILTER_V2_L2_GEN:
+ enable_cfg = 0x5;
+ rule_mask = 0x3EFF;
+ break;
+ case QED_PTP_FILTER_V2_EVENT:
+ enable_cfg = 0x5;
+ rule_mask = 0x3CAA;
+ break;
+ case QED_PTP_FILTER_V2_GEN:
+ enable_cfg = 0x5;
+ rule_mask = 0x3EEE;
+ break;
+ default:
+ DP_INFO(p_hwfn, "Invalid PTP filter type %d\n", rx_type);
+ return -EINVAL;
+ }
+
+ qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK,
+ QED_PTP_UCAST_PARAM_MASK);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, rule_mask);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_RX_PTP_EN, enable_cfg);
+
+ if (tx_type == QED_PTP_HWTSTAMP_TX_OFF) {
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, 0x0);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0x7FF);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3FFF);
+ } else {
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, enable_cfg);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK,
+ QED_PTP_UCAST_PARAM_MASK);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, rule_mask);
+ }
+
+ /* Reset possibly old timestamps */
+ qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID,
+ QED_TIMESTAMP_MASK);
+
+ return 0;
+}
+
+/* Adjust the HW clock by a rate given in parts-per-billion (ppb) units.
+ * FW/HW accepts the adjustment value in terms of 3 parameters:
+ * Drift period - adjustment happens once in certain number of nano seconds.
+ * Drift value - time is adjusted by a certain value, for example by 5 ns.
+ * Drift direction - add or subtract the adjustment value.
+ * The routine translates ppb into the adjustment triplet in an optimal manner.
+ */
+static int qed_ptp_hw_adjfreq(struct qed_dev *cdev, s32 ppb)
+{
+ s64 best_val = 0, val, best_period = 0, period, approx_dev, dif, dif2;
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
+ u32 drift_ctr_cfg = 0, drift_state;
+ int drift_dir = 1;
+
+ if (ppb < 0) {
+ ppb = -ppb;
+ drift_dir = 0;
+ }
+
+ if (ppb > 1) {
+ s64 best_dif = ppb, best_approx_dev = 1;
+
+ /* Adjustment value is up to +/-7ns, find an optimal value in
+ * this range.
+ */
+ for (val = 7; val > 0; val--) {
+ period = div_s64(val * 1000000000, ppb);
+ period -= 8;
+ period >>= 4;
+ if (period < 1)
+ period = 1;
+ if (period > 0xFFFFFFE)
+ period = 0xFFFFFFE;
+
+ /* Check both rounding ends for approximate error */
+ approx_dev = period * 16 + 8;
+ dif = ppb * approx_dev - val * 1000000000;
+ dif2 = dif + 16 * ppb;
+
+ if (dif < 0)
+ dif = -dif;
+ if (dif2 < 0)
+ dif2 = -dif2;
+
+ /* Determine which end gives better approximation */
+ if (dif * (approx_dev + 16) > dif2 * approx_dev) {
+ period++;
+ approx_dev += 16;
+ dif = dif2;
+ }
+
+ /* Track best approximation found so far */
+ if (best_dif * approx_dev > dif * best_approx_dev) {
+ best_dif = dif;
+ best_val = val;
+ best_period = period;
+ best_approx_dev = approx_dev;
+ }
+ }
+ } else if (ppb == 1) {
+ /* This is a special case as its the only value which wouldn't
+ * fit in a s64 variable. In order to prevent castings simple
+ * handle it seperately.
+ */
+ best_val = 4;
+ best_period = 0xee6b27f;
+ } else {
+ best_val = 0;
+ best_period = 0xFFFFFFF;
+ }
+
+ drift_ctr_cfg = (best_period << QED_DRIFT_CNTR_TIME_QUANTA_SHIFT) |
+ (((int)best_val) << QED_DRIFT_CNTR_ADJUSTMENT_SHIFT) |
+ (((int)drift_dir) << QED_DRIFT_CNTR_DIRECTION_SHIFT);
+
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR, 0x1);
+
+ drift_state = qed_rd(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR);
+ if (drift_state & 1) {
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_DRIFT_CNTR_CONF,
+ drift_ctr_cfg);
+ } else {
+ DP_INFO(p_hwfn, "Drift counter is not reset\n");
+ return -EINVAL;
+ }
+
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR, 0x0);
+
+ return 0;
+}
+
+static int qed_ptp_hw_enable(struct qed_dev *cdev)
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *p_ptt;
+ int rc;
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ DP_NOTICE(p_hwfn, "Failed to acquire PTT for PTP\n");
+ return -EBUSY;
+ }
+
+ p_hwfn->p_ptp_ptt = p_ptt;
+
+ rc = qed_ptp_res_lock(p_hwfn, p_ptt);
+ if (rc) {
+ DP_INFO(p_hwfn,
+ "Couldn't acquire the resource lock, skip ptp enable for this PF\n");
+ qed_ptt_release(p_hwfn, p_ptt);
+ p_hwfn->p_ptp_ptt = NULL;
+ return rc;
+ }
+
+ /* Reset PTP event detection rules - will be configured in the IOCTL */
+ qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, 0x7FF);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, 0x3FFF);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0x7FF);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3FFF);
+
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, 7);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_RX_PTP_EN, 7);
+
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TS_OUTPUT_ENABLE_PDA, 0x1);
+
+ /* Pause free running counter */
+ if (QED_IS_BB_B0(p_hwfn->cdev))
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 2);
+ if (QED_IS_AH(p_hwfn->cdev))
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREECNT_UPDATE_K2, 2);
+
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREE_CNT_VALUE_LSB, 0);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREE_CNT_VALUE_MSB, 0);
+ /* Resume free running counter */
+ if (QED_IS_BB_B0(p_hwfn->cdev))
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 4);
+ if (QED_IS_AH(p_hwfn->cdev)) {
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREECNT_UPDATE_K2, 4);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_PTP_LATCH_OSTS_PKT_TIME, 1);
+ }
+
+ /* Disable drift register */
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_DRIFT_CNTR_CONF, 0x0);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR, 0x0);
+
+ /* Reset possibly old timestamps */
+ qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID,
+ QED_TIMESTAMP_MASK);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_SEQID, QED_TIMESTAMP_MASK);
+
+ return 0;
+}
+
+static int qed_ptp_hw_disable(struct qed_dev *cdev)
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt;
+
+ qed_ptp_res_unlock(p_hwfn, p_ptt);
+
+ /* Reset PTP event detection rules */
+ qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, 0x7FF);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, 0x3FFF);
+
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0x7FF);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3FFF);
+
+ /* Disable the PTP feature */
+ qed_wr(p_hwfn, p_ptt, NIG_REG_RX_PTP_EN, 0x0);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, 0x0);
+
+ qed_ptt_release(p_hwfn, p_ptt);
+ p_hwfn->p_ptp_ptt = NULL;
+
+ return 0;
+}
+
+const struct qed_eth_ptp_ops qed_ptp_ops_pass = {
+ .cfg_filters = qed_ptp_hw_cfg_filters,
+ .read_rx_ts = qed_ptp_hw_read_rx_ts,
+ .read_tx_ts = qed_ptp_hw_read_tx_ts,
+ .read_cc = qed_ptp_hw_read_cc,
+ .adjfreq = qed_ptp_hw_adjfreq,
+ .disable = qed_ptp_hw_disable,
+ .enable = qed_ptp_hw_enable,
+};
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ptp.h b/drivers/net/ethernet/qlogic/qed/qed_ptp.h
new file mode 100644
index 000000000..40a11c0e1
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_ptp.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* Copyright (c) 2020 Marvell International Ltd. */
+
+#ifndef __QED_PTP_H
+#define __QED_PTP_H
+
+extern const struct qed_eth_ptp_ops qed_ptp_ops_pass;
+
+#endif /* __QED_PTP_H */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
new file mode 100644
index 000000000..5a5dbbb8d
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -0,0 +1,2041 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ * Copyright (c) 2019-2020 Marvell International Ltd.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <net/addrconf.h>
+#include "qed.h"
+#include "qed_cxt.h"
+#include "qed_hsi.h"
+#include "qed_iro_hsi.h"
+#include "qed_hw.h"
+#include "qed_init_ops.h"
+#include "qed_int.h"
+#include "qed_ll2.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+#include <linux/qed/qed_rdma_if.h>
+#include "qed_rdma.h"
+#include "qed_roce.h"
+#include "qed_sp.h"
+
+int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn,
+ struct qed_bmap *bmap, u32 max_count, char *name)
+{
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "max_count = %08x\n", max_count);
+
+ bmap->max_count = max_count;
+
+ bmap->bitmap = bitmap_zalloc(max_count, GFP_KERNEL);
+ if (!bmap->bitmap)
+ return -ENOMEM;
+
+ snprintf(bmap->name, QED_RDMA_MAX_BMAP_NAME, "%s", name);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n");
+ return 0;
+}
+
+int qed_rdma_bmap_alloc_id(struct qed_hwfn *p_hwfn,
+ struct qed_bmap *bmap, u32 *id_num)
+{
+ *id_num = find_first_zero_bit(bmap->bitmap, bmap->max_count);
+ if (*id_num >= bmap->max_count)
+ return -EINVAL;
+
+ __set_bit(*id_num, bmap->bitmap);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "%s bitmap: allocated id %d\n",
+ bmap->name, *id_num);
+
+ return 0;
+}
+
+void qed_bmap_set_id(struct qed_hwfn *p_hwfn,
+ struct qed_bmap *bmap, u32 id_num)
+{
+ if (id_num >= bmap->max_count)
+ return;
+
+ __set_bit(id_num, bmap->bitmap);
+}
+
+void qed_bmap_release_id(struct qed_hwfn *p_hwfn,
+ struct qed_bmap *bmap, u32 id_num)
+{
+ bool b_acquired;
+
+ if (id_num >= bmap->max_count)
+ return;
+
+ b_acquired = test_and_clear_bit(id_num, bmap->bitmap);
+ if (!b_acquired) {
+ DP_NOTICE(p_hwfn, "%s bitmap: id %d already released\n",
+ bmap->name, id_num);
+ return;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "%s bitmap: released id %d\n",
+ bmap->name, id_num);
+}
+
+int qed_bmap_test_id(struct qed_hwfn *p_hwfn,
+ struct qed_bmap *bmap, u32 id_num)
+{
+ if (id_num >= bmap->max_count)
+ return -1;
+
+ return test_bit(id_num, bmap->bitmap);
+}
+
+static bool qed_bmap_is_empty(struct qed_bmap *bmap)
+{
+ return bitmap_empty(bmap->bitmap, bmap->max_count);
+}
+
+static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
+{
+ /* First sb id for RoCE is after all the l2 sb */
+ return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id;
+}
+
+int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_rdma_info *p_rdma_info;
+
+ p_rdma_info = kzalloc(sizeof(*p_rdma_info), GFP_KERNEL);
+ if (!p_rdma_info)
+ return -ENOMEM;
+
+ spin_lock_init(&p_rdma_info->lock);
+
+ p_hwfn->p_rdma_info = p_rdma_info;
+ return 0;
+}
+
+void qed_rdma_info_free(struct qed_hwfn *p_hwfn)
+{
+ kfree(p_hwfn->p_rdma_info);
+ p_hwfn->p_rdma_info = NULL;
+}
+
+static int qed_rdma_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
+ u32 num_cons, num_tasks;
+ int rc = -ENOMEM;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocating RDMA\n");
+
+ if (QED_IS_IWARP_PERSONALITY(p_hwfn))
+ p_rdma_info->proto = PROTOCOLID_IWARP;
+ else
+ p_rdma_info->proto = PROTOCOLID_ROCE;
+
+ num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto,
+ NULL);
+
+ if (QED_IS_IWARP_PERSONALITY(p_hwfn))
+ p_rdma_info->num_qps = num_cons;
+ else
+ p_rdma_info->num_qps = num_cons / 2; /* 2 cids per qp */
+
+ num_tasks = qed_cxt_get_proto_tid_count(p_hwfn, PROTOCOLID_ROCE);
+
+ /* Each MR uses a single task */
+ p_rdma_info->num_mrs = num_tasks;
+
+ /* Queue zone lines are shared between RoCE and L2 in such a way that
+ * they can be used by each without obstructing the other.
+ */
+ p_rdma_info->queue_zone_base = (u16)RESC_START(p_hwfn, QED_L2_QUEUE);
+ p_rdma_info->max_queue_zones = (u16)RESC_NUM(p_hwfn, QED_L2_QUEUE);
+
+ /* Allocate a struct with device params and fill it */
+ p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL);
+ if (!p_rdma_info->dev)
+ return rc;
+
+ /* Allocate a struct with port params and fill it */
+ p_rdma_info->port = kzalloc(sizeof(*p_rdma_info->port), GFP_KERNEL);
+ if (!p_rdma_info->port)
+ goto free_rdma_dev;
+
+ /* Allocate bit map for pd's */
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->pd_map, RDMA_MAX_PDS,
+ "PD");
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate pd_map, rc = %d\n",
+ rc);
+ goto free_rdma_port;
+ }
+
+ /* Allocate bit map for XRC Domains */
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->xrcd_map,
+ QED_RDMA_MAX_XRCDS, "XRCD");
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate xrcd_map,rc = %d\n", rc);
+ goto free_pd_map;
+ }
+
+ /* Allocate DPI bitmap */
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->dpi_map,
+ p_hwfn->dpi_count, "DPI");
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate DPI bitmap, rc = %d\n", rc);
+ goto free_xrcd_map;
+ }
+
+ /* Allocate bitmap for cq's. The maximum number of CQs is bound to
+ * the number of connections we support. (num_qps in iWARP or
+ * num_qps/2 in RoCE).
+ */
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cq_map, num_cons, "CQ");
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate cq bitmap, rc = %d\n", rc);
+ goto free_dpi_map;
+ }
+
+ /* Allocate bitmap for toggle bit for cq icids
+ * We toggle the bit every time we create or resize cq for a given icid.
+ * Size needs to equal the size of the cq bmap.
+ */
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->toggle_bits,
+ num_cons, "Toggle");
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate toggle bits, rc = %d\n", rc);
+ goto free_cq_map;
+ }
+
+ /* Allocate bitmap for itids */
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->tid_map,
+ p_rdma_info->num_mrs, "MR");
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate itids bitmaps, rc = %d\n", rc);
+ goto free_toggle_map;
+ }
+
+ /* Allocate bitmap for cids used for qps. */
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cid_map, num_cons,
+ "CID");
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate cid bitmap, rc = %d\n", rc);
+ goto free_tid_map;
+ }
+
+ /* Allocate bitmap for cids used for responders/requesters. */
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->real_cid_map, num_cons,
+ "REAL_CID");
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate real cid bitmap, rc = %d\n", rc);
+ goto free_cid_map;
+ }
+
+ /* The first SRQ follows the last XRC SRQ. This means that the
+ * SRQ IDs start from an offset equals to max_xrc_srqs.
+ */
+ p_rdma_info->srq_id_offset = p_hwfn->p_cxt_mngr->xrc_srq_count;
+ rc = qed_rdma_bmap_alloc(p_hwfn,
+ &p_rdma_info->xrc_srq_map,
+ p_hwfn->p_cxt_mngr->xrc_srq_count, "XRC SRQ");
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate xrc srq bitmap, rc = %d\n", rc);
+ goto free_real_cid_map;
+ }
+
+ /* Allocate bitmap for srqs */
+ p_rdma_info->num_srqs = p_hwfn->p_cxt_mngr->srq_count;
+ rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->srq_map,
+ p_rdma_info->num_srqs, "SRQ");
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to allocate srq bitmap, rc = %d\n", rc);
+ goto free_xrc_srq_map;
+ }
+
+ if (QED_IS_IWARP_PERSONALITY(p_hwfn))
+ rc = qed_iwarp_alloc(p_hwfn);
+
+ if (rc)
+ goto free_srq_map;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n");
+ return 0;
+
+free_srq_map:
+ kfree(p_rdma_info->srq_map.bitmap);
+free_xrc_srq_map:
+ kfree(p_rdma_info->xrc_srq_map.bitmap);
+free_real_cid_map:
+ kfree(p_rdma_info->real_cid_map.bitmap);
+free_cid_map:
+ kfree(p_rdma_info->cid_map.bitmap);
+free_tid_map:
+ kfree(p_rdma_info->tid_map.bitmap);
+free_toggle_map:
+ kfree(p_rdma_info->toggle_bits.bitmap);
+free_cq_map:
+ kfree(p_rdma_info->cq_map.bitmap);
+free_dpi_map:
+ kfree(p_rdma_info->dpi_map.bitmap);
+free_xrcd_map:
+ kfree(p_rdma_info->xrcd_map.bitmap);
+free_pd_map:
+ kfree(p_rdma_info->pd_map.bitmap);
+free_rdma_port:
+ kfree(p_rdma_info->port);
+free_rdma_dev:
+ kfree(p_rdma_info->dev);
+
+ return rc;
+}
+
+void qed_rdma_bmap_free(struct qed_hwfn *p_hwfn,
+ struct qed_bmap *bmap, bool check)
+{
+ unsigned int bit, weight, nbits;
+ unsigned long *b;
+
+ if (!check)
+ goto end;
+
+ weight = bitmap_weight(bmap->bitmap, bmap->max_count);
+ if (!weight)
+ goto end;
+
+ DP_NOTICE(p_hwfn,
+ "%s bitmap not free - size=%d, weight=%d, 512 bits per line\n",
+ bmap->name, bmap->max_count, weight);
+
+ for (bit = 0; bit < bmap->max_count; bit += 512) {
+ b = bmap->bitmap + BITS_TO_LONGS(bit);
+ nbits = min(bmap->max_count - bit, 512U);
+
+ if (!bitmap_empty(b, nbits))
+ DP_NOTICE(p_hwfn,
+ "line 0x%04x: %*pb\n", bit / 512, nbits, b);
+ }
+
+end:
+ bitmap_free(bmap->bitmap);
+ bmap->bitmap = NULL;
+}
+
+static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
+{
+ struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
+
+ if (QED_IS_IWARP_PERSONALITY(p_hwfn))
+ qed_iwarp_resc_free(p_hwfn);
+
+ qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cid_map, 1);
+ qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->pd_map, 1);
+ qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, 1);
+ qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cq_map, 1);
+ qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->toggle_bits, 0);
+ qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tid_map, 1);
+ qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->srq_map, 1);
+ qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->real_cid_map, 1);
+ qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->xrc_srq_map, 1);
+ qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->xrcd_map, 1);
+
+ kfree(p_rdma_info->port);
+ kfree(p_rdma_info->dev);
+}
+
+static void qed_rdma_free_tid(void *rdma_cxt, u32 itid)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tid_map, itid);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
+static void qed_rdma_free_reserved_lkey(struct qed_hwfn *p_hwfn)
+{
+ qed_rdma_free_tid(p_hwfn, p_hwfn->p_rdma_info->dev->reserved_lkey);
+}
+
+static void qed_rdma_free(struct qed_hwfn *p_hwfn)
+{
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n");
+
+ qed_rdma_free_reserved_lkey(p_hwfn);
+ qed_cxt_free_proto_ilt(p_hwfn, p_hwfn->p_rdma_info->proto);
+ qed_rdma_resc_free(p_hwfn);
+}
+
+static void qed_rdma_init_events(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_start_in_params *params)
+{
+ struct qed_rdma_events *events;
+
+ events = &p_hwfn->p_rdma_info->events;
+
+ events->unaffiliated_event = params->events->unaffiliated_event;
+ events->affiliated_event = params->events->affiliated_event;
+ events->context = params->events->context;
+}
+
+static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_start_in_params *params)
+{
+ struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
+ struct qed_dev *cdev = p_hwfn->cdev;
+ u32 pci_status_control;
+ u32 num_qps;
+
+ /* Vendor specific information */
+ dev->vendor_id = cdev->vendor_id;
+ dev->vendor_part_id = cdev->device_id;
+ dev->hw_ver = cdev->chip_rev;
+ dev->fw_ver = (FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) |
+ (FW_REVISION_VERSION << 8) | (FW_ENGINEERING_VERSION);
+
+ addrconf_addr_eui48((u8 *)&dev->sys_image_guid,
+ p_hwfn->hw_info.hw_mac_addr);
+
+ dev->node_guid = dev->sys_image_guid;
+
+ dev->max_sge = min_t(u32, RDMA_MAX_SGE_PER_SQ_WQE,
+ RDMA_MAX_SGE_PER_RQ_WQE);
+
+ if (cdev->rdma_max_sge)
+ dev->max_sge = min_t(u32, cdev->rdma_max_sge, dev->max_sge);
+
+ dev->max_srq_sge = QED_RDMA_MAX_SGE_PER_SRQ_WQE;
+ if (p_hwfn->cdev->rdma_max_srq_sge) {
+ dev->max_srq_sge = min_t(u32,
+ p_hwfn->cdev->rdma_max_srq_sge,
+ dev->max_srq_sge);
+ }
+ dev->max_inline = ROCE_REQ_MAX_INLINE_DATA_SIZE;
+
+ dev->max_inline = (cdev->rdma_max_inline) ?
+ min_t(u32, cdev->rdma_max_inline, dev->max_inline) :
+ dev->max_inline;
+
+ dev->max_wqe = QED_RDMA_MAX_WQE;
+ dev->max_cnq = (u8)FEAT_NUM(p_hwfn, QED_RDMA_CNQ);
+
+ /* The number of QPs may be higher than QED_ROCE_MAX_QPS, because
+ * it is up-aligned to 16 and then to ILT page size within qed cxt.
+ * This is OK in terms of ILT but we don't want to configure the FW
+ * above its abilities
+ */
+ num_qps = ROCE_MAX_QPS;
+ num_qps = min_t(u64, num_qps, p_hwfn->p_rdma_info->num_qps);
+ dev->max_qp = num_qps;
+
+ /* CQs uses the same icids that QPs use hence they are limited by the
+ * number of icids. There are two icids per QP.
+ */
+ dev->max_cq = num_qps * 2;
+
+ /* The number of mrs is smaller by 1 since the first is reserved */
+ dev->max_mr = p_hwfn->p_rdma_info->num_mrs - 1;
+ dev->max_mr_size = QED_RDMA_MAX_MR_SIZE;
+
+ /* The maximum CQE capacity per CQ supported.
+ * max number of cqes will be in two layer pbl,
+ * 8 is the pointer size in bytes
+ * 32 is the size of cq element in bytes
+ */
+ if (params->cq_mode == QED_RDMA_CQ_MODE_32_BITS)
+ dev->max_cqe = QED_RDMA_MAX_CQE_32_BIT;
+ else
+ dev->max_cqe = QED_RDMA_MAX_CQE_16_BIT;
+
+ dev->max_mw = 0;
+ dev->max_mr_mw_fmr_pbl = (PAGE_SIZE / 8) * (PAGE_SIZE / 8);
+ dev->max_mr_mw_fmr_size = dev->max_mr_mw_fmr_pbl * PAGE_SIZE;
+ if (QED_IS_ROCE_PERSONALITY(p_hwfn))
+ dev->max_pkey = QED_RDMA_MAX_P_KEY;
+
+ dev->max_srq = p_hwfn->p_rdma_info->num_srqs;
+ dev->max_srq_wr = QED_RDMA_MAX_SRQ_WQE_ELEM;
+ dev->max_qp_resp_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
+ (RDMA_RESP_RD_ATOMIC_ELM_SIZE * 2);
+ dev->max_qp_req_rd_atomic_resc = RDMA_RING_PAGE_SIZE /
+ RDMA_REQ_RD_ATOMIC_ELM_SIZE;
+ dev->max_dev_resp_rd_atomic_resc = dev->max_qp_resp_rd_atomic_resc *
+ p_hwfn->p_rdma_info->num_qps;
+ dev->page_size_caps = QED_RDMA_PAGE_SIZE_CAPS;
+ dev->dev_ack_delay = QED_RDMA_ACK_DELAY;
+ dev->max_pd = RDMA_MAX_PDS;
+ dev->max_ah = p_hwfn->p_rdma_info->num_qps;
+ dev->max_stats_queues = (u8)RESC_NUM(p_hwfn, QED_RDMA_STATS_QUEUE);
+
+ /* Set capablities */
+ dev->dev_caps = 0;
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RNR_NAK, 1);
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT, 1);
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT, 1);
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RESIZE_CQ, 1);
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_MEMORY_EXT, 1);
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_QUEUE_EXT, 1);
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ZBVA, 1);
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_LOCAL_INV_FENCE, 1);
+
+ /* Check atomic operations support in PCI configuration space. */
+ pcie_capability_read_dword(cdev->pdev, PCI_EXP_DEVCTL2,
+ &pci_status_control);
+
+ if (pci_status_control & PCI_EXP_DEVCTL2_LTR_EN)
+ SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ATOMIC_OP, 1);
+
+ if (QED_IS_IWARP_PERSONALITY(p_hwfn))
+ qed_iwarp_init_devinfo(p_hwfn);
+}
+
+static void qed_rdma_init_port(struct qed_hwfn *p_hwfn)
+{
+ struct qed_rdma_port *port = p_hwfn->p_rdma_info->port;
+ struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
+
+ port->port_state = p_hwfn->mcp_info->link_output.link_up ?
+ QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN;
+
+ port->max_msg_size = min_t(u64,
+ (dev->max_mr_mw_fmr_size *
+ p_hwfn->cdev->rdma_max_sge),
+ BIT(31));
+
+ port->pkey_bad_counter = 0;
+}
+
+static int qed_rdma_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ int rc = 0;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW\n");
+ p_hwfn->b_rdma_enabled_in_prs = false;
+
+ if (QED_IS_IWARP_PERSONALITY(p_hwfn))
+ qed_iwarp_init_hw(p_hwfn, p_ptt);
+ else
+ rc = qed_roce_init_hw(p_hwfn, p_ptt);
+
+ return rc;
+}
+
+static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_start_in_params *params,
+ struct qed_ptt *p_ptt)
+{
+ struct rdma_init_func_ramrod_data *p_ramrod;
+ struct qed_rdma_cnq_params *p_cnq_pbl_list;
+ struct rdma_init_func_hdr *p_params_header;
+ struct rdma_cnq_params *p_cnq_params;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ u32 cnq_id, sb_id;
+ u16 igu_sb_id;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Starting FW\n");
+
+ /* Save the number of cnqs for the function close ramrod */
+ p_hwfn->p_rdma_info->num_cnqs = params->desired_cnq;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_INIT,
+ p_hwfn->p_rdma_info->proto, &init_data);
+ if (rc)
+ return rc;
+
+ if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
+ qed_iwarp_init_fw_ramrod(p_hwfn,
+ &p_ent->ramrod.iwarp_init_func);
+ p_ramrod = &p_ent->ramrod.iwarp_init_func.rdma;
+ } else {
+ p_ramrod = &p_ent->ramrod.roce_init_func.rdma;
+ }
+
+ p_params_header = &p_ramrod->params_header;
+ p_params_header->cnq_start_offset = (u8)RESC_START(p_hwfn,
+ QED_RDMA_CNQ_RAM);
+ p_params_header->num_cnqs = params->desired_cnq;
+ p_params_header->first_reg_srq_id =
+ cpu_to_le16(p_hwfn->p_rdma_info->srq_id_offset);
+ p_params_header->reg_srq_base_addr =
+ cpu_to_le32(qed_cxt_get_ilt_page_size(p_hwfn, ILT_CLI_TSDM));
+ if (params->cq_mode == QED_RDMA_CQ_MODE_16_BITS)
+ p_params_header->cq_ring_mode = 1;
+ else
+ p_params_header->cq_ring_mode = 0;
+
+ for (cnq_id = 0; cnq_id < params->desired_cnq; cnq_id++) {
+ sb_id = qed_rdma_get_sb_id(p_hwfn, cnq_id);
+ igu_sb_id = qed_get_igu_sb_id(p_hwfn, sb_id);
+ p_ramrod->cnq_params[cnq_id].sb_num = cpu_to_le16(igu_sb_id);
+ p_cnq_params = &p_ramrod->cnq_params[cnq_id];
+ p_cnq_pbl_list = &params->cnq_pbl_list[cnq_id];
+
+ p_cnq_params->sb_index = p_hwfn->pf_params.rdma_pf_params.gl_pi;
+ p_cnq_params->num_pbl_pages = p_cnq_pbl_list->num_pbl_pages;
+
+ DMA_REGPAIR_LE(p_cnq_params->pbl_base_addr,
+ p_cnq_pbl_list->pbl_ptr);
+
+ /* we assume here that cnq_id and qz_offset are the same */
+ p_cnq_params->queue_zone_num =
+ cpu_to_le16(p_hwfn->p_rdma_info->queue_zone_base +
+ cnq_id);
+ }
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n");
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ rc = qed_rdma_bmap_alloc_id(p_hwfn,
+ &p_hwfn->p_rdma_info->tid_map, itid);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+ if (rc)
+ goto out;
+
+ rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid);
+out:
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc);
+ return rc;
+}
+
+static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn)
+{
+ struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev;
+
+ /* Tid 0 will be used as the key for "reserved MR".
+ * The driver should allocate memory for it so it can be loaded but no
+ * ramrod should be passed on it.
+ */
+ qed_rdma_alloc_tid(p_hwfn, &dev->reserved_lkey);
+ if (dev->reserved_lkey != RDMA_RESERVED_LKEY) {
+ DP_NOTICE(p_hwfn,
+ "Reserved lkey should be equal to RDMA_RESERVED_LKEY\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int qed_rdma_setup(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_rdma_start_in_params *params)
+{
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA setup\n");
+
+ qed_rdma_init_devinfo(p_hwfn, params);
+ qed_rdma_init_port(p_hwfn);
+ qed_rdma_init_events(p_hwfn, params);
+
+ rc = qed_rdma_reserve_lkey(p_hwfn);
+ if (rc)
+ return rc;
+
+ rc = qed_rdma_init_hw(p_hwfn, p_ptt);
+ if (rc)
+ return rc;
+
+ if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
+ rc = qed_iwarp_setup(p_hwfn, params);
+ if (rc)
+ return rc;
+ } else {
+ rc = qed_roce_setup(p_hwfn);
+ if (rc)
+ return rc;
+ }
+
+ return qed_rdma_start_fw(p_hwfn, params, p_ptt);
+}
+
+static int qed_rdma_stop(void *rdma_cxt)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct rdma_close_func_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ struct qed_ptt *p_ptt;
+ u32 ll2_ethertype_en;
+ int rc = -EBUSY;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop\n");
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Failed to acquire PTT\n");
+ return rc;
+ }
+
+ /* Disable RoCE search */
+ qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0);
+ p_hwfn->b_rdma_enabled_in_prs = false;
+ p_hwfn->p_rdma_info->active = 0;
+ qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0);
+
+ ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
+
+ qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
+ (ll2_ethertype_en & 0xFFFE));
+
+ if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
+ rc = qed_iwarp_stop(p_hwfn);
+ if (rc) {
+ qed_ptt_release(p_hwfn, p_ptt);
+ return rc;
+ }
+ } else {
+ qed_roce_stop(p_hwfn);
+ }
+
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ /* Stop RoCE */
+ rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_CLOSE,
+ p_hwfn->p_rdma_info->proto, &init_data);
+ if (rc)
+ goto out;
+
+ p_ramrod = &p_ent->ramrod.rdma_close_func;
+
+ p_ramrod->num_cnqs = p_hwfn->p_rdma_info->num_cnqs;
+ p_ramrod->cnq_start_offset = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM);
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+out:
+ qed_rdma_free(p_hwfn);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop done, rc = %d\n", rc);
+ return rc;
+}
+
+static int qed_rdma_add_user(void *rdma_cxt,
+ struct qed_rdma_add_user_out_params *out_params)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ u32 dpi_start_offset;
+ u32 returned_id = 0;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding User\n");
+
+ /* Allocate DPI */
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map,
+ &returned_id);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+
+ out_params->dpi = (u16)returned_id;
+
+ /* Calculate the corresponding DPI address */
+ dpi_start_offset = p_hwfn->dpi_start_offset;
+
+ out_params->dpi_addr = p_hwfn->doorbells + dpi_start_offset +
+ out_params->dpi * p_hwfn->dpi_size;
+
+ out_params->dpi_phys_addr = p_hwfn->db_phys_addr +
+ dpi_start_offset +
+ ((out_params->dpi) * p_hwfn->dpi_size);
+
+ out_params->dpi_size = p_hwfn->dpi_size;
+ out_params->wid_count = p_hwfn->wid_count;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding user - done, rc = %d\n", rc);
+ return rc;
+}
+
+static struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct qed_rdma_port *p_port = p_hwfn->p_rdma_info->port;
+ struct qed_mcp_link_state *p_link_output;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA Query port\n");
+
+ /* The link state is saved only for the leading hwfn */
+ p_link_output = &QED_LEADING_HWFN(p_hwfn->cdev)->mcp_info->link_output;
+
+ p_port->port_state = p_link_output->link_up ? QED_RDMA_PORT_UP
+ : QED_RDMA_PORT_DOWN;
+
+ p_port->link_speed = p_link_output->speed;
+
+ p_port->max_msg_size = RDMA_MAX_DATA_SIZE_IN_WQE;
+
+ return p_port;
+}
+
+static struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query device\n");
+
+ /* Return struct with device parameters */
+ return p_hwfn->p_rdma_info->dev;
+}
+
+static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
+{
+ struct qed_hwfn *p_hwfn;
+ u16 qz_num;
+ u32 addr;
+
+ p_hwfn = (struct qed_hwfn *)rdma_cxt;
+
+ if (qz_offset > p_hwfn->p_rdma_info->max_queue_zones) {
+ DP_NOTICE(p_hwfn,
+ "queue zone offset %d is too large (max is %d)\n",
+ qz_offset, p_hwfn->p_rdma_info->max_queue_zones);
+ return;
+ }
+
+ qz_num = p_hwfn->p_rdma_info->queue_zone_base + qz_offset;
+ addr = GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_USDM_RAM,
+ USTORM_COMMON_QUEUE_CONS, qz_num);
+
+ REG_WR16(p_hwfn, addr, prod);
+
+ /* keep prod updates ordered */
+ wmb();
+}
+
+static int qed_fill_rdma_dev_info(struct qed_dev *cdev,
+ struct qed_dev_rdma_info *info)
+{
+ struct qed_hwfn *p_hwfn = QED_AFFIN_HWFN(cdev);
+
+ memset(info, 0, sizeof(*info));
+
+ info->rdma_type = QED_IS_ROCE_PERSONALITY(p_hwfn) ?
+ QED_RDMA_TYPE_ROCE : QED_RDMA_TYPE_IWARP;
+
+ info->user_dpm_enabled = (p_hwfn->db_bar_no_edpm == 0);
+
+ qed_fill_dev_info(cdev, &info->common);
+
+ return 0;
+}
+
+static int qed_rdma_get_sb_start(struct qed_dev *cdev)
+{
+ int feat_num;
+
+ if (cdev->num_hwfns > 1)
+ feat_num = FEAT_NUM(QED_AFFIN_HWFN(cdev), QED_PF_L2_QUE);
+ else
+ feat_num = FEAT_NUM(QED_AFFIN_HWFN(cdev), QED_PF_L2_QUE) *
+ cdev->num_hwfns;
+
+ return feat_num;
+}
+
+static int qed_rdma_get_min_cnq_msix(struct qed_dev *cdev)
+{
+ int n_cnq = FEAT_NUM(QED_AFFIN_HWFN(cdev), QED_RDMA_CNQ);
+ int n_msix = cdev->int_params.rdma_msix_cnt;
+
+ return min_t(int, n_cnq, n_msix);
+}
+
+static int qed_rdma_set_int(struct qed_dev *cdev, u16 cnt)
+{
+ int limit = 0;
+
+ /* Mark the fastpath as free/used */
+ cdev->int_params.fp_initialized = cnt ? true : false;
+
+ if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX) {
+ DP_ERR(cdev,
+ "qed roce supports only MSI-X interrupts (detected %d).\n",
+ cdev->int_params.out.int_mode);
+ return -EINVAL;
+ } else if (cdev->int_params.fp_msix_cnt) {
+ limit = cdev->int_params.rdma_msix_cnt;
+ }
+
+ if (!limit)
+ return -ENOMEM;
+
+ return min_t(int, cnt, limit);
+}
+
+static int qed_rdma_get_int(struct qed_dev *cdev, struct qed_int_info *info)
+{
+ memset(info, 0, sizeof(*info));
+
+ if (!cdev->int_params.fp_initialized) {
+ DP_INFO(cdev,
+ "Protocol driver requested interrupt information, but its support is not yet configured\n");
+ return -EINVAL;
+ }
+
+ if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
+ int msix_base = cdev->int_params.rdma_msix_base;
+
+ info->msix_cnt = cdev->int_params.rdma_msix_cnt;
+ info->msix = &cdev->int_params.msix_table[msix_base];
+
+ DP_VERBOSE(cdev, QED_MSG_RDMA, "msix_cnt = %d msix_base=%d\n",
+ info->msix_cnt, msix_base);
+ }
+
+ return 0;
+}
+
+static int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ u32 returned_id;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD\n");
+
+ /* Allocates an unused protection domain */
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ rc = qed_rdma_bmap_alloc_id(p_hwfn,
+ &p_hwfn->p_rdma_info->pd_map, &returned_id);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+
+ *pd = (u16)returned_id;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD - done, rc = %d\n", rc);
+ return rc;
+}
+
+static void qed_rdma_free_pd(void *rdma_cxt, u16 pd)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "pd = %08x\n", pd);
+
+ /* Returns a previously allocated protection domain for reuse */
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->pd_map, pd);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
+static int qed_rdma_alloc_xrcd(void *rdma_cxt, u16 *xrcd_id)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ u32 returned_id;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc XRCD\n");
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ rc = qed_rdma_bmap_alloc_id(p_hwfn,
+ &p_hwfn->p_rdma_info->xrcd_map,
+ &returned_id);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed in allocating xrcd id\n");
+ return rc;
+ }
+
+ *xrcd_id = (u16)returned_id;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc XRCD - done, rc = %d\n", rc);
+ return rc;
+}
+
+static void qed_rdma_free_xrcd(void *rdma_cxt, u16 xrcd_id)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "xrcd_id = %08x\n", xrcd_id);
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->xrcd_map, xrcd_id);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
+static enum qed_rdma_toggle_bit
+qed_rdma_toggle_bit_create_resize_cq(struct qed_hwfn *p_hwfn, u16 icid)
+{
+ struct qed_rdma_info *p_info = p_hwfn->p_rdma_info;
+ enum qed_rdma_toggle_bit toggle_bit;
+ u32 bmap_id;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", icid);
+
+ /* the function toggle the bit that is related to a given icid
+ * and returns the new toggle bit's value
+ */
+ bmap_id = icid - qed_cxt_get_proto_cid_start(p_hwfn, p_info->proto);
+
+ spin_lock_bh(&p_info->lock);
+ toggle_bit = !test_and_change_bit(bmap_id,
+ p_info->toggle_bits.bitmap);
+ spin_unlock_bh(&p_info->lock);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QED_RDMA_TOGGLE_BIT_= %d\n",
+ toggle_bit);
+
+ return toggle_bit;
+}
+
+static int qed_rdma_create_cq(void *rdma_cxt,
+ struct qed_rdma_create_cq_in_params *params,
+ u16 *icid)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct qed_rdma_info *p_info = p_hwfn->p_rdma_info;
+ struct rdma_create_cq_ramrod_data *p_ramrod;
+ enum qed_rdma_toggle_bit toggle_bit;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ u32 returned_id, start_cid;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "cq_handle = %08x%08x\n",
+ params->cq_handle_hi, params->cq_handle_lo);
+
+ /* Allocate icid */
+ spin_lock_bh(&p_info->lock);
+ rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_info->cq_map, &returned_id);
+ spin_unlock_bh(&p_info->lock);
+
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Can't create CQ, rc = %d\n", rc);
+ return rc;
+ }
+
+ start_cid = qed_cxt_get_proto_cid_start(p_hwfn,
+ p_info->proto);
+ *icid = returned_id + start_cid;
+
+ /* Check if icid requires a page allocation */
+ rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, *icid);
+ if (rc)
+ goto err;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = *icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ /* Send create CQ ramrod */
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ RDMA_RAMROD_CREATE_CQ,
+ p_info->proto, &init_data);
+ if (rc)
+ goto err;
+
+ p_ramrod = &p_ent->ramrod.rdma_create_cq;
+
+ p_ramrod->cq_handle.hi = cpu_to_le32(params->cq_handle_hi);
+ p_ramrod->cq_handle.lo = cpu_to_le32(params->cq_handle_lo);
+ p_ramrod->dpi = cpu_to_le16(params->dpi);
+ p_ramrod->is_two_level_pbl = params->pbl_two_level;
+ p_ramrod->max_cqes = cpu_to_le32(params->cq_size);
+ DMA_REGPAIR_LE(p_ramrod->pbl_addr, params->pbl_ptr);
+ p_ramrod->pbl_num_pages = cpu_to_le16(params->pbl_num_pages);
+ p_ramrod->cnq_id = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM) +
+ params->cnq_id;
+ p_ramrod->int_timeout = cpu_to_le16(params->int_timeout);
+
+ /* toggle the bit for every resize or create cq for a given icid */
+ toggle_bit = qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid);
+
+ p_ramrod->toggle_bit = toggle_bit;
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc) {
+ /* restore toggle bit */
+ qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid);
+ goto err;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Created CQ, rc = %d\n", rc);
+ return rc;
+
+err:
+ /* release allocated icid */
+ spin_lock_bh(&p_info->lock);
+ qed_bmap_release_id(p_hwfn, &p_info->cq_map, returned_id);
+ spin_unlock_bh(&p_info->lock);
+ DP_NOTICE(p_hwfn, "Create CQ failed, rc = %d\n", rc);
+
+ return rc;
+}
+
+static int
+qed_rdma_destroy_cq(void *rdma_cxt,
+ struct qed_rdma_destroy_cq_in_params *in_params,
+ struct qed_rdma_destroy_cq_out_params *out_params)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct rdma_destroy_cq_output_params *p_ramrod_res;
+ struct rdma_destroy_cq_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ dma_addr_t ramrod_res_phys;
+ enum protocol_type proto;
+ int rc = -ENOMEM;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", in_params->icid);
+
+ p_ramrod_res =
+ dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(struct rdma_destroy_cq_output_params),
+ &ramrod_res_phys, GFP_KERNEL);
+ if (!p_ramrod_res) {
+ DP_NOTICE(p_hwfn,
+ "qed destroy cq failed: cannot allocate memory (ramrod)\n");
+ return rc;
+ }
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = in_params->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+ proto = p_hwfn->p_rdma_info->proto;
+ /* Send destroy CQ ramrod */
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ RDMA_RAMROD_DESTROY_CQ,
+ proto, &init_data);
+ if (rc)
+ goto err;
+
+ p_ramrod = &p_ent->ramrod.rdma_destroy_cq;
+ DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc)
+ goto err;
+
+ out_params->num_cq_notif = le16_to_cpu(p_ramrod_res->cnq_num);
+
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(struct rdma_destroy_cq_output_params),
+ p_ramrod_res, ramrod_res_phys);
+
+ /* Free icid */
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+
+ qed_bmap_release_id(p_hwfn,
+ &p_hwfn->p_rdma_info->cq_map,
+ (in_params->icid -
+ qed_cxt_get_proto_cid_start(p_hwfn, proto)));
+
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroyed CQ, rc = %d\n", rc);
+ return rc;
+
+err: dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(struct rdma_destroy_cq_output_params),
+ p_ramrod_res, ramrod_res_phys);
+
+ return rc;
+}
+
+void qed_rdma_set_fw_mac(__le16 *p_fw_mac, const u8 *p_qed_mac)
+{
+ p_fw_mac[0] = cpu_to_le16((p_qed_mac[0] << 8) + p_qed_mac[1]);
+ p_fw_mac[1] = cpu_to_le16((p_qed_mac[2] << 8) + p_qed_mac[3]);
+ p_fw_mac[2] = cpu_to_le16((p_qed_mac[4] << 8) + p_qed_mac[5]);
+}
+
+static int qed_rdma_query_qp(void *rdma_cxt,
+ struct qed_rdma_qp *qp,
+ struct qed_rdma_query_qp_out_params *out_params)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ int rc = 0;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
+
+ /* The following fields are filled in from qp and not FW as they can't
+ * be modified by FW
+ */
+ out_params->mtu = qp->mtu;
+ out_params->dest_qp = qp->dest_qp;
+ out_params->incoming_atomic_en = qp->incoming_atomic_en;
+ out_params->e2e_flow_control_en = qp->e2e_flow_control_en;
+ out_params->incoming_rdma_read_en = qp->incoming_rdma_read_en;
+ out_params->incoming_rdma_write_en = qp->incoming_rdma_write_en;
+ out_params->dgid = qp->dgid;
+ out_params->flow_label = qp->flow_label;
+ out_params->hop_limit_ttl = qp->hop_limit_ttl;
+ out_params->traffic_class_tos = qp->traffic_class_tos;
+ out_params->timeout = qp->ack_timeout;
+ out_params->rnr_retry = qp->rnr_retry_cnt;
+ out_params->retry_cnt = qp->retry_cnt;
+ out_params->min_rnr_nak_timer = qp->min_rnr_nak_timer;
+ out_params->pkey_index = 0;
+ out_params->max_rd_atomic = qp->max_rd_atomic_req;
+ out_params->max_dest_rd_atomic = qp->max_rd_atomic_resp;
+ out_params->sqd_async = qp->sqd_async;
+
+ if (QED_IS_IWARP_PERSONALITY(p_hwfn))
+ qed_iwarp_query_qp(qp, out_params);
+ else
+ rc = qed_roce_query_qp(p_hwfn, qp, out_params);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query QP, rc = %d\n", rc);
+ return rc;
+}
+
+static int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ int rc = 0;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
+
+ if (QED_IS_IWARP_PERSONALITY(p_hwfn))
+ rc = qed_iwarp_destroy_qp(p_hwfn, qp);
+ else
+ rc = qed_roce_destroy_qp(p_hwfn, qp);
+
+ /* free qp params struct */
+ kfree(qp);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP destroyed\n");
+ return rc;
+}
+
+static struct qed_rdma_qp *
+qed_rdma_create_qp(void *rdma_cxt,
+ struct qed_rdma_create_qp_in_params *in_params,
+ struct qed_rdma_create_qp_out_params *out_params)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct qed_rdma_qp *qp;
+ u8 max_stats_queues;
+ int rc;
+
+ if (!rdma_cxt || !in_params || !out_params ||
+ !p_hwfn->p_rdma_info->active) {
+ pr_err("qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n",
+ rdma_cxt, in_params, out_params);
+ return NULL;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "qed rdma create qp called with qp_handle = %08x%08x\n",
+ in_params->qp_handle_hi, in_params->qp_handle_lo);
+
+ /* Some sanity checks... */
+ max_stats_queues = p_hwfn->p_rdma_info->dev->max_stats_queues;
+ if (in_params->stats_queue >= max_stats_queues) {
+ DP_ERR(p_hwfn->cdev,
+ "qed rdma create qp failed due to invalid statistics queue %d. maximum is %d\n",
+ in_params->stats_queue, max_stats_queues);
+ return NULL;
+ }
+
+ if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
+ if (in_params->sq_num_pages * sizeof(struct regpair) >
+ IWARP_SHARED_QUEUE_PAGE_SQ_PBL_MAX_SIZE) {
+ DP_NOTICE(p_hwfn->cdev,
+ "Sq num pages: %d exceeds maximum\n",
+ in_params->sq_num_pages);
+ return NULL;
+ }
+ if (in_params->rq_num_pages * sizeof(struct regpair) >
+ IWARP_SHARED_QUEUE_PAGE_RQ_PBL_MAX_SIZE) {
+ DP_NOTICE(p_hwfn->cdev,
+ "Rq num pages: %d exceeds maximum\n",
+ in_params->rq_num_pages);
+ return NULL;
+ }
+ }
+
+ qp = kzalloc(sizeof(*qp), GFP_KERNEL);
+ if (!qp)
+ return NULL;
+
+ qp->cur_state = QED_ROCE_QP_STATE_RESET;
+ qp->qp_handle.hi = cpu_to_le32(in_params->qp_handle_hi);
+ qp->qp_handle.lo = cpu_to_le32(in_params->qp_handle_lo);
+ qp->qp_handle_async.hi = cpu_to_le32(in_params->qp_handle_async_hi);
+ qp->qp_handle_async.lo = cpu_to_le32(in_params->qp_handle_async_lo);
+ qp->use_srq = in_params->use_srq;
+ qp->signal_all = in_params->signal_all;
+ qp->fmr_and_reserved_lkey = in_params->fmr_and_reserved_lkey;
+ qp->pd = in_params->pd;
+ qp->dpi = in_params->dpi;
+ qp->sq_cq_id = in_params->sq_cq_id;
+ qp->sq_num_pages = in_params->sq_num_pages;
+ qp->sq_pbl_ptr = in_params->sq_pbl_ptr;
+ qp->rq_cq_id = in_params->rq_cq_id;
+ qp->rq_num_pages = in_params->rq_num_pages;
+ qp->rq_pbl_ptr = in_params->rq_pbl_ptr;
+ qp->srq_id = in_params->srq_id;
+ qp->req_offloaded = false;
+ qp->resp_offloaded = false;
+ qp->e2e_flow_control_en = qp->use_srq ? false : true;
+ qp->stats_queue = in_params->stats_queue;
+ qp->qp_type = in_params->qp_type;
+ qp->xrcd_id = in_params->xrcd_id;
+
+ if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
+ rc = qed_iwarp_create_qp(p_hwfn, qp, out_params);
+ qp->qpid = qp->icid;
+ } else {
+ qp->edpm_mode = GET_FIELD(in_params->flags, QED_ROCE_EDPM_MODE);
+ rc = qed_roce_alloc_cid(p_hwfn, &qp->icid);
+ qp->qpid = ((0xFF << 16) | qp->icid);
+ }
+
+ if (rc) {
+ kfree(qp);
+ return NULL;
+ }
+
+ out_params->icid = qp->icid;
+ out_params->qp_id = qp->qpid;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Create QP, rc = %d\n", rc);
+ return qp;
+}
+
+static int qed_rdma_modify_qp(void *rdma_cxt,
+ struct qed_rdma_qp *qp,
+ struct qed_rdma_modify_qp_in_params *params)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ enum qed_roce_qp_state prev_state;
+ int rc = 0;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x params->new_state=%d\n",
+ qp->icid, params->new_state);
+
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
+ return rc;
+ }
+
+ if (GET_FIELD(params->modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN)) {
+ qp->incoming_rdma_read_en = params->incoming_rdma_read_en;
+ qp->incoming_rdma_write_en = params->incoming_rdma_write_en;
+ qp->incoming_atomic_en = params->incoming_atomic_en;
+ }
+
+ /* Update QP structure with the updated values */
+ if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_ROCE_MODE))
+ qp->roce_mode = params->roce_mode;
+ if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY))
+ qp->pkey = params->pkey;
+ if (GET_FIELD(params->modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN))
+ qp->e2e_flow_control_en = params->e2e_flow_control_en;
+ if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_DEST_QP))
+ qp->dest_qp = params->dest_qp;
+ if (GET_FIELD(params->modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR)) {
+ /* Indicates that the following parameters have changed:
+ * Traffic class, flow label, hop limit, source GID,
+ * destination GID, loopback indicator
+ */
+ qp->traffic_class_tos = params->traffic_class_tos;
+ qp->flow_label = params->flow_label;
+ qp->hop_limit_ttl = params->hop_limit_ttl;
+
+ qp->sgid = params->sgid;
+ qp->dgid = params->dgid;
+ qp->udp_src_port = 0;
+ qp->vlan_id = params->vlan_id;
+ qp->mtu = params->mtu;
+ qp->lb_indication = params->lb_indication;
+ memcpy((u8 *)&qp->remote_mac_addr[0],
+ (u8 *)&params->remote_mac_addr[0], ETH_ALEN);
+ if (params->use_local_mac) {
+ memcpy((u8 *)&qp->local_mac_addr[0],
+ (u8 *)&params->local_mac_addr[0], ETH_ALEN);
+ } else {
+ memcpy((u8 *)&qp->local_mac_addr[0],
+ (u8 *)&p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
+ }
+ }
+ if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RQ_PSN))
+ qp->rq_psn = params->rq_psn;
+ if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_SQ_PSN))
+ qp->sq_psn = params->sq_psn;
+ if (GET_FIELD(params->modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ))
+ qp->max_rd_atomic_req = params->max_rd_atomic_req;
+ if (GET_FIELD(params->modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP))
+ qp->max_rd_atomic_resp = params->max_rd_atomic_resp;
+ if (GET_FIELD(params->modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT))
+ qp->ack_timeout = params->ack_timeout;
+ if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT))
+ qp->retry_cnt = params->retry_cnt;
+ if (GET_FIELD(params->modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT))
+ qp->rnr_retry_cnt = params->rnr_retry_cnt;
+ if (GET_FIELD(params->modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER))
+ qp->min_rnr_nak_timer = params->min_rnr_nak_timer;
+
+ qp->sqd_async = params->sqd_async;
+
+ prev_state = qp->cur_state;
+ if (GET_FIELD(params->modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_NEW_STATE)) {
+ qp->cur_state = params->new_state;
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "qp->cur_state=%d\n",
+ qp->cur_state);
+ }
+
+ switch (qp->qp_type) {
+ case QED_RDMA_QP_TYPE_XRC_INI:
+ qp->has_req = true;
+ break;
+ case QED_RDMA_QP_TYPE_XRC_TGT:
+ qp->has_resp = true;
+ break;
+ default:
+ qp->has_req = true;
+ qp->has_resp = true;
+ }
+
+ if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
+ enum qed_iwarp_qp_state new_state =
+ qed_roce2iwarp_state(qp->cur_state);
+
+ rc = qed_iwarp_modify_qp(p_hwfn, qp, new_state, 0);
+ } else {
+ rc = qed_roce_modify_qp(p_hwfn, qp, prev_state, params);
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify QP, rc = %d\n", rc);
+ return rc;
+}
+
+static int
+qed_rdma_register_tid(void *rdma_cxt,
+ struct qed_rdma_register_tid_in_params *params)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct rdma_register_tid_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ enum rdma_tid_type tid_type;
+ u8 fw_return_code;
+ u16 flags = 0;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", params->itid);
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_REGISTER_MR,
+ p_hwfn->p_rdma_info->proto, &init_data);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
+ return rc;
+ }
+
+ if (p_hwfn->p_rdma_info->last_tid < params->itid)
+ p_hwfn->p_rdma_info->last_tid = params->itid;
+
+ SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL,
+ params->pbl_two_level);
+
+ SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED,
+ false);
+
+ SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR, params->phy_mr);
+
+ /* Don't initialize D/C field, as it may override other bits. */
+ if (!(params->tid_type == QED_RDMA_TID_FMR) && !(params->dma_mr))
+ SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG,
+ params->page_size_log - 12);
+
+ SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ,
+ params->remote_read);
+
+ SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE,
+ params->remote_write);
+
+ SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC,
+ params->remote_atomic);
+
+ SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE,
+ params->local_write);
+
+ SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ,
+ params->local_read);
+
+ SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND,
+ params->mw_bind);
+
+ p_ramrod = &p_ent->ramrod.rdma_register_tid;
+ p_ramrod->flags = cpu_to_le16(flags);
+
+ SET_FIELD(p_ramrod->flags1,
+ RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG,
+ params->pbl_page_size_log - 12);
+
+ SET_FIELD(p_ramrod->flags2, RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR,
+ params->dma_mr);
+
+ switch (params->tid_type) {
+ case QED_RDMA_TID_REGISTERED_MR:
+ tid_type = RDMA_TID_REGISTERED_MR;
+ break;
+ case QED_RDMA_TID_FMR:
+ tid_type = RDMA_TID_FMR;
+ break;
+ case QED_RDMA_TID_MW:
+ tid_type = RDMA_TID_MW;
+ break;
+ default:
+ rc = -EINVAL;
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
+ qed_sp_destroy_request(p_hwfn, p_ent);
+ return rc;
+ }
+
+ SET_FIELD(p_ramrod->flags1, RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE,
+ tid_type);
+
+ p_ramrod->itid = cpu_to_le32(params->itid);
+ p_ramrod->key = params->key;
+ p_ramrod->pd = cpu_to_le16(params->pd);
+ p_ramrod->length_hi = (u8)(params->length >> 32);
+ p_ramrod->length_lo = DMA_LO_LE(params->length);
+ DMA_REGPAIR_LE(p_ramrod->va, params->vaddr);
+ DMA_REGPAIR_LE(p_ramrod->pbl_base, params->pbl_ptr);
+
+ /* DIF */
+ if (params->dif_enabled) {
+ SET_FIELD(p_ramrod->flags2,
+ RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG, 1);
+ DMA_REGPAIR_LE(p_ramrod->dif_error_addr,
+ params->dif_error_addr);
+ }
+
+ rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
+ if (rc)
+ return rc;
+
+ if (fw_return_code != RDMA_RETURN_OK) {
+ DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
+ return -EINVAL;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Register TID, rc = %d\n", rc);
+ return rc;
+}
+
+static int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct rdma_deregister_tid_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ struct qed_ptt *p_ptt;
+ u8 fw_return_code;
+ int rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid);
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_DEREGISTER_MR,
+ p_hwfn->p_rdma_info->proto, &init_data);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
+ return rc;
+ }
+
+ p_ramrod = &p_ent->ramrod.rdma_deregister_tid;
+ p_ramrod->itid = cpu_to_le32(itid);
+
+ rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc);
+ return rc;
+ }
+
+ if (fw_return_code == RDMA_RETURN_DEREGISTER_MR_BAD_STATE_ERR) {
+ DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code);
+ return -EINVAL;
+ } else if (fw_return_code == RDMA_RETURN_NIG_DRAIN_REQ) {
+ /* Bit indicating that the TID is in use and a nig drain is
+ * required before sending the ramrod again
+ */
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ rc = -EBUSY;
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to acquire PTT\n");
+ return rc;
+ }
+
+ rc = qed_mcp_drain(p_hwfn, p_ptt);
+ if (rc) {
+ qed_ptt_release(p_hwfn, p_ptt);
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Drain failed\n");
+ return rc;
+ }
+
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ /* Resend the ramrod */
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ RDMA_RAMROD_DEREGISTER_MR,
+ p_hwfn->p_rdma_info->proto,
+ &init_data);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Failed to init sp-element\n");
+ return rc;
+ }
+
+ rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
+ if (rc) {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Ramrod failed\n");
+ return rc;
+ }
+
+ if (fw_return_code != RDMA_RETURN_OK) {
+ DP_NOTICE(p_hwfn, "fw_return_code = %d\n",
+ fw_return_code);
+ return rc;
+ }
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "De-registered TID, rc = %d\n", rc);
+ return rc;
+}
+
+static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev)
+{
+ return QED_AFFIN_HWFN(cdev);
+}
+
+static struct qed_bmap *qed_rdma_get_srq_bmap(struct qed_hwfn *p_hwfn,
+ bool is_xrc)
+{
+ if (is_xrc)
+ return &p_hwfn->p_rdma_info->xrc_srq_map;
+
+ return &p_hwfn->p_rdma_info->srq_map;
+}
+
+static int qed_rdma_modify_srq(void *rdma_cxt,
+ struct qed_rdma_modify_srq_in_params *in_params)
+{
+ struct rdma_srq_modify_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data = {};
+ struct qed_hwfn *p_hwfn = rdma_cxt;
+ struct qed_spq_entry *p_ent;
+ u16 opaque_fid;
+ int rc;
+
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ RDMA_RAMROD_MODIFY_SRQ,
+ p_hwfn->p_rdma_info->proto, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.rdma_modify_srq;
+ p_ramrod->srq_id.srq_idx = cpu_to_le16(in_params->srq_id);
+ opaque_fid = p_hwfn->hw_info.opaque_fid;
+ p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid);
+ p_ramrod->wqe_limit = cpu_to_le32(in_params->wqe_limit);
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc)
+ return rc;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "modified SRQ id = %x, is_xrc=%u\n",
+ in_params->srq_id, in_params->is_xrc);
+
+ return rc;
+}
+
+static int
+qed_rdma_destroy_srq(void *rdma_cxt,
+ struct qed_rdma_destroy_srq_in_params *in_params)
+{
+ struct rdma_srq_destroy_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data = {};
+ struct qed_hwfn *p_hwfn = rdma_cxt;
+ struct qed_spq_entry *p_ent;
+ struct qed_bmap *bmap;
+ u16 opaque_fid;
+ u16 offset;
+ int rc;
+
+ opaque_fid = p_hwfn->hw_info.opaque_fid;
+
+ init_data.opaque_fid = opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ RDMA_RAMROD_DESTROY_SRQ,
+ p_hwfn->p_rdma_info->proto, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.rdma_destroy_srq;
+ p_ramrod->srq_id.srq_idx = cpu_to_le16(in_params->srq_id);
+ p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid);
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc)
+ return rc;
+
+ bmap = qed_rdma_get_srq_bmap(p_hwfn, in_params->is_xrc);
+ offset = (in_params->is_xrc) ? 0 : p_hwfn->p_rdma_info->srq_id_offset;
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ qed_bmap_release_id(p_hwfn, bmap, in_params->srq_id - offset);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "XRC/SRQ destroyed Id = %x, is_xrc=%u\n",
+ in_params->srq_id, in_params->is_xrc);
+
+ return rc;
+}
+
+static int
+qed_rdma_create_srq(void *rdma_cxt,
+ struct qed_rdma_create_srq_in_params *in_params,
+ struct qed_rdma_create_srq_out_params *out_params)
+{
+ struct rdma_srq_create_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data = {};
+ struct qed_hwfn *p_hwfn = rdma_cxt;
+ enum qed_cxt_elem_type elem_type;
+ struct qed_spq_entry *p_ent;
+ u16 opaque_fid, srq_id;
+ struct qed_bmap *bmap;
+ u32 returned_id;
+ u16 offset;
+ int rc;
+
+ bmap = qed_rdma_get_srq_bmap(p_hwfn, in_params->is_xrc);
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ rc = qed_rdma_bmap_alloc_id(p_hwfn, bmap, &returned_id);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "failed to allocate xrc/srq id (is_xrc=%u)\n",
+ in_params->is_xrc);
+ return rc;
+ }
+
+ elem_type = (in_params->is_xrc) ? (QED_ELEM_XRC_SRQ) : (QED_ELEM_SRQ);
+ rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, elem_type, returned_id);
+ if (rc)
+ goto err;
+
+ opaque_fid = p_hwfn->hw_info.opaque_fid;
+
+ opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.opaque_fid = opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ RDMA_RAMROD_CREATE_SRQ,
+ p_hwfn->p_rdma_info->proto, &init_data);
+ if (rc)
+ goto err;
+
+ p_ramrod = &p_ent->ramrod.rdma_create_srq;
+ DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, in_params->pbl_base_addr);
+ p_ramrod->pages_in_srq_pbl = cpu_to_le16(in_params->num_pages);
+ p_ramrod->pd_id = cpu_to_le16(in_params->pd_id);
+ p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid);
+ p_ramrod->page_size = cpu_to_le16(in_params->page_size);
+ DMA_REGPAIR_LE(p_ramrod->producers_addr, in_params->prod_pair_addr);
+ offset = (in_params->is_xrc) ? 0 : p_hwfn->p_rdma_info->srq_id_offset;
+ srq_id = (u16)returned_id + offset;
+ p_ramrod->srq_id.srq_idx = cpu_to_le16(srq_id);
+
+ if (in_params->is_xrc) {
+ SET_FIELD(p_ramrod->flags,
+ RDMA_SRQ_CREATE_RAMROD_DATA_XRC_FLAG, 1);
+ SET_FIELD(p_ramrod->flags,
+ RDMA_SRQ_CREATE_RAMROD_DATA_RESERVED_KEY_EN,
+ in_params->reserved_key_en);
+ p_ramrod->xrc_srq_cq_cid =
+ cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
+ in_params->cq_cid);
+ p_ramrod->xrc_domain = cpu_to_le16(in_params->xrcd_id);
+ }
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc)
+ goto err;
+
+ out_params->srq_id = srq_id;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_RDMA,
+ "XRC/SRQ created Id = %x (is_xrc=%u)\n",
+ out_params->srq_id, in_params->is_xrc);
+ return rc;
+
+err:
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ qed_bmap_release_id(p_hwfn, bmap, returned_id);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+
+ return rc;
+}
+
+bool qed_rdma_allocated_qps(struct qed_hwfn *p_hwfn)
+{
+ bool result;
+
+ /* if rdma wasn't activated yet, naturally there are no qps */
+ if (!p_hwfn->p_rdma_info->active)
+ return false;
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ if (!p_hwfn->p_rdma_info->cid_map.bitmap)
+ result = false;
+ else
+ result = !qed_bmap_is_empty(&p_hwfn->p_rdma_info->cid_map);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+ return result;
+}
+
+void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 val;
+
+ val = (p_hwfn->dcbx_no_edpm || p_hwfn->db_bar_no_edpm) ? 0 : 1;
+
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPM_ENABLE, val);
+ DP_VERBOSE(p_hwfn, (QED_MSG_DCB | QED_MSG_RDMA),
+ "Changing DPM_EN state to %d (DCBX=%d, DB_BAR=%d)\n",
+ val, p_hwfn->dcbx_no_edpm, p_hwfn->db_bar_no_edpm);
+}
+
+void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ p_hwfn->db_bar_no_edpm = true;
+
+ qed_rdma_dpm_conf(p_hwfn, p_ptt);
+}
+
+static int qed_rdma_start(void *rdma_cxt,
+ struct qed_rdma_start_in_params *params)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+ struct qed_ptt *p_ptt;
+ int rc = -EBUSY;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "desired_cnq = %08x\n", params->desired_cnq);
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ goto err;
+
+ rc = qed_rdma_alloc(p_hwfn);
+ if (rc)
+ goto err1;
+
+ rc = qed_rdma_setup(p_hwfn, p_ptt, params);
+ if (rc)
+ goto err2;
+
+ qed_ptt_release(p_hwfn, p_ptt);
+ p_hwfn->p_rdma_info->active = 1;
+
+ return rc;
+
+err2:
+ qed_rdma_free(p_hwfn);
+err1:
+ qed_ptt_release(p_hwfn, p_ptt);
+err:
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA start - error, rc = %d\n", rc);
+ return rc;
+}
+
+static int qed_rdma_init(struct qed_dev *cdev,
+ struct qed_rdma_start_in_params *params)
+{
+ return qed_rdma_start(QED_AFFIN_HWFN(cdev), params);
+}
+
+static void qed_rdma_remove_user(void *rdma_cxt, u16 dpi)
+{
+ struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "dpi = %08x\n", dpi);
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, dpi);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
+static int qed_roce_ll2_set_mac_filter(struct qed_dev *cdev,
+ u8 *old_mac_address,
+ const u8 *new_mac_address)
+{
+ int rc = 0;
+
+ if (old_mac_address)
+ qed_llh_remove_mac_filter(cdev, 0, old_mac_address);
+ if (new_mac_address)
+ rc = qed_llh_add_mac_filter(cdev, 0, new_mac_address);
+
+ if (rc)
+ DP_ERR(cdev,
+ "qed roce ll2 mac filter set: failed to add MAC filter\n");
+
+ return rc;
+}
+
+static int qed_iwarp_set_engine_affin(struct qed_dev *cdev, bool b_reset)
+{
+ enum qed_eng eng;
+ u8 ppfid = 0;
+ int rc;
+
+ /* Make sure iwarp cmt mode is enabled before setting affinity */
+ if (!cdev->iwarp_cmt)
+ return -EINVAL;
+
+ if (b_reset)
+ eng = QED_BOTH_ENG;
+ else
+ eng = cdev->l2_affin_hint ? QED_ENG1 : QED_ENG0;
+
+ rc = qed_llh_set_ppfid_affinity(cdev, ppfid, eng);
+ if (rc) {
+ DP_NOTICE(cdev,
+ "Failed to set the engine affinity of ppfid %d\n",
+ ppfid);
+ return rc;
+ }
+
+ DP_VERBOSE(cdev, (QED_MSG_RDMA | QED_MSG_SP),
+ "LLH: Set the engine affinity of non-RoCE packets as %d\n",
+ eng);
+
+ return 0;
+}
+
+static const struct qed_rdma_ops qed_rdma_ops_pass = {
+ .common = &qed_common_ops_pass,
+ .fill_dev_info = &qed_fill_rdma_dev_info,
+ .rdma_get_rdma_ctx = &qed_rdma_get_rdma_ctx,
+ .rdma_init = &qed_rdma_init,
+ .rdma_add_user = &qed_rdma_add_user,
+ .rdma_remove_user = &qed_rdma_remove_user,
+ .rdma_stop = &qed_rdma_stop,
+ .rdma_query_port = &qed_rdma_query_port,
+ .rdma_query_device = &qed_rdma_query_device,
+ .rdma_get_start_sb = &qed_rdma_get_sb_start,
+ .rdma_get_rdma_int = &qed_rdma_get_int,
+ .rdma_set_rdma_int = &qed_rdma_set_int,
+ .rdma_get_min_cnq_msix = &qed_rdma_get_min_cnq_msix,
+ .rdma_cnq_prod_update = &qed_rdma_cnq_prod_update,
+ .rdma_alloc_pd = &qed_rdma_alloc_pd,
+ .rdma_dealloc_pd = &qed_rdma_free_pd,
+ .rdma_alloc_xrcd = &qed_rdma_alloc_xrcd,
+ .rdma_dealloc_xrcd = &qed_rdma_free_xrcd,
+ .rdma_create_cq = &qed_rdma_create_cq,
+ .rdma_destroy_cq = &qed_rdma_destroy_cq,
+ .rdma_create_qp = &qed_rdma_create_qp,
+ .rdma_modify_qp = &qed_rdma_modify_qp,
+ .rdma_query_qp = &qed_rdma_query_qp,
+ .rdma_destroy_qp = &qed_rdma_destroy_qp,
+ .rdma_alloc_tid = &qed_rdma_alloc_tid,
+ .rdma_free_tid = &qed_rdma_free_tid,
+ .rdma_register_tid = &qed_rdma_register_tid,
+ .rdma_deregister_tid = &qed_rdma_deregister_tid,
+ .rdma_create_srq = &qed_rdma_create_srq,
+ .rdma_modify_srq = &qed_rdma_modify_srq,
+ .rdma_destroy_srq = &qed_rdma_destroy_srq,
+ .ll2_acquire_connection = &qed_ll2_acquire_connection,
+ .ll2_establish_connection = &qed_ll2_establish_connection,
+ .ll2_terminate_connection = &qed_ll2_terminate_connection,
+ .ll2_release_connection = &qed_ll2_release_connection,
+ .ll2_post_rx_buffer = &qed_ll2_post_rx_buffer,
+ .ll2_prepare_tx_packet = &qed_ll2_prepare_tx_packet,
+ .ll2_set_fragment_of_tx_packet = &qed_ll2_set_fragment_of_tx_packet,
+ .ll2_set_mac_filter = &qed_roce_ll2_set_mac_filter,
+ .ll2_get_stats = &qed_ll2_get_stats,
+ .iwarp_set_engine_affin = &qed_iwarp_set_engine_affin,
+ .iwarp_connect = &qed_iwarp_connect,
+ .iwarp_create_listen = &qed_iwarp_create_listen,
+ .iwarp_destroy_listen = &qed_iwarp_destroy_listen,
+ .iwarp_accept = &qed_iwarp_accept,
+ .iwarp_reject = &qed_iwarp_reject,
+ .iwarp_send_rtr = &qed_iwarp_send_rtr,
+};
+
+const struct qed_rdma_ops *qed_get_rdma_ops(void)
+{
+ return &qed_rdma_ops_pass;
+}
+EXPORT_SYMBOL(qed_get_rdma_ops);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.h b/drivers/net/ethernet/qlogic/qed/qed_rdma.h
new file mode 100644
index 000000000..275372301
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.h
@@ -0,0 +1,210 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ * Copyright (c) 2019-2020 Marvell International Ltd.
+ */
+
+#ifndef _QED_RDMA_H
+#define _QED_RDMA_H
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/qed/qed_if.h>
+#include <linux/qed/qed_rdma_if.h>
+#include "qed.h"
+#include "qed_dev_api.h"
+#include "qed_hsi.h"
+#include "qed_iwarp.h"
+#include "qed_roce.h"
+
+#define QED_RDMA_MAX_P_KEY (1)
+#define QED_RDMA_MAX_WQE (0x7FFF)
+#define QED_RDMA_MAX_SRQ_WQE_ELEM (0x7FFF)
+#define QED_RDMA_PAGE_SIZE_CAPS (0xFFFFF000)
+#define QED_RDMA_ACK_DELAY (15)
+#define QED_RDMA_MAX_MR_SIZE (0x10000000000ULL)
+#define QED_RDMA_MAX_CQS (RDMA_MAX_CQS)
+#define QED_RDMA_MAX_MRS (RDMA_MAX_TIDS)
+/* Add 1 for header element */
+#define QED_RDMA_MAX_SRQ_ELEM_PER_WQE (RDMA_MAX_SGE_PER_RQ_WQE + 1)
+#define QED_RDMA_MAX_SGE_PER_SRQ_WQE (RDMA_MAX_SGE_PER_RQ_WQE)
+#define QED_RDMA_SRQ_WQE_ELEM_SIZE (16)
+#define QED_RDMA_MAX_SRQS (32 * 1024)
+
+#define QED_RDMA_MAX_CQE_32_BIT (0x7FFFFFFF - 1)
+#define QED_RDMA_MAX_CQE_16_BIT (0x7FFF - 1)
+
+/* Up to 2^16 XRC Domains are supported, but the actual number of supported XRC
+ * SRQs is much smaller so there's no need to have that many domains.
+ */
+#define QED_RDMA_MAX_XRCDS (roundup_pow_of_two(RDMA_MAX_XRC_SRQS))
+
+enum qed_rdma_toggle_bit {
+ QED_RDMA_TOGGLE_BIT_CLEAR = 0,
+ QED_RDMA_TOGGLE_BIT_SET = 1
+};
+
+#define QED_RDMA_MAX_BMAP_NAME (10)
+struct qed_bmap {
+ unsigned long *bitmap;
+ u32 max_count;
+ char name[QED_RDMA_MAX_BMAP_NAME];
+};
+
+struct qed_rdma_info {
+ /* spin lock to protect bitmaps */
+ spinlock_t lock;
+
+ struct qed_bmap cq_map;
+ struct qed_bmap pd_map;
+ struct qed_bmap xrcd_map;
+ struct qed_bmap tid_map;
+ struct qed_bmap qp_map;
+ struct qed_bmap srq_map;
+ struct qed_bmap xrc_srq_map;
+ struct qed_bmap cid_map;
+ struct qed_bmap tcp_cid_map;
+ struct qed_bmap real_cid_map;
+ struct qed_bmap dpi_map;
+ struct qed_bmap toggle_bits;
+ struct qed_rdma_events events;
+ struct qed_rdma_device *dev;
+ struct qed_rdma_port *port;
+ u32 last_tid;
+ u8 num_cnqs;
+ u32 num_qps;
+ u32 num_mrs;
+ u32 num_srqs;
+ u16 srq_id_offset;
+ u16 queue_zone_base;
+ u16 max_queue_zones;
+ enum protocol_type proto;
+ struct qed_iwarp_info iwarp;
+ u8 active:1;
+};
+
+struct qed_rdma_qp {
+ struct regpair qp_handle;
+ struct regpair qp_handle_async;
+ u32 qpid;
+ u16 icid;
+ enum qed_roce_qp_state cur_state;
+ enum qed_rdma_qp_type qp_type;
+ enum qed_iwarp_qp_state iwarp_state;
+ bool use_srq;
+ bool signal_all;
+ bool fmr_and_reserved_lkey;
+
+ bool incoming_rdma_read_en;
+ bool incoming_rdma_write_en;
+ bool incoming_atomic_en;
+ bool e2e_flow_control_en;
+
+ u16 pd;
+ u16 pkey;
+ u32 dest_qp;
+ u16 mtu;
+ u16 srq_id;
+ u8 traffic_class_tos;
+ u8 hop_limit_ttl;
+ u16 dpi;
+ u32 flow_label;
+ bool lb_indication;
+ u16 vlan_id;
+ u32 ack_timeout;
+ u8 retry_cnt;
+ u8 rnr_retry_cnt;
+ u8 min_rnr_nak_timer;
+ bool sqd_async;
+ union qed_gid sgid;
+ union qed_gid dgid;
+ enum roce_mode roce_mode;
+ u16 udp_src_port;
+ u8 stats_queue;
+
+ /* requeseter */
+ u8 max_rd_atomic_req;
+ u32 sq_psn;
+ u16 sq_cq_id;
+ u16 sq_num_pages;
+ dma_addr_t sq_pbl_ptr;
+ void *orq;
+ dma_addr_t orq_phys_addr;
+ u8 orq_num_pages;
+ bool req_offloaded;
+ bool has_req;
+
+ /* responder */
+ u8 max_rd_atomic_resp;
+ u32 rq_psn;
+ u16 rq_cq_id;
+ u16 rq_num_pages;
+ u16 xrcd_id;
+ dma_addr_t rq_pbl_ptr;
+ void *irq;
+ dma_addr_t irq_phys_addr;
+ u8 irq_num_pages;
+ bool resp_offloaded;
+ u32 cq_prod;
+ bool has_resp;
+
+ u8 remote_mac_addr[6];
+ u8 local_mac_addr[6];
+
+ void *shared_queue;
+ dma_addr_t shared_queue_phys_addr;
+ struct qed_iwarp_ep *ep;
+ u8 edpm_mode;
+};
+
+static inline bool qed_rdma_is_xrc_qp(struct qed_rdma_qp *qp)
+{
+ if (qp->qp_type == QED_RDMA_QP_TYPE_XRC_TGT ||
+ qp->qp_type == QED_RDMA_QP_TYPE_XRC_INI)
+ return true;
+
+ return false;
+}
+
+#if IS_ENABLED(CONFIG_QED_RDMA)
+void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn);
+void qed_rdma_info_free(struct qed_hwfn *p_hwfn);
+#else
+static inline void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt) {}
+static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt) {}
+static inline int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn)
+ {return -EINVAL; }
+static inline void qed_rdma_info_free(struct qed_hwfn *p_hwfn) {}
+#endif
+
+int
+qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn,
+ struct qed_bmap *bmap, u32 max_count, char *name);
+
+void
+qed_rdma_bmap_free(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap, bool check);
+
+int
+qed_rdma_bmap_alloc_id(struct qed_hwfn *p_hwfn,
+ struct qed_bmap *bmap, u32 *id_num);
+
+void
+qed_bmap_set_id(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap, u32 id_num);
+
+void
+qed_bmap_release_id(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap, u32 id_num);
+
+int
+qed_bmap_test_id(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap, u32 id_num);
+
+void qed_rdma_set_fw_mac(__le16 *p_fw_mac, const u8 *p_qed_mac);
+
+bool qed_rdma_allocated_qps(struct qed_hwfn *p_hwfn);
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
new file mode 100644
index 000000000..b5e35f433
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -0,0 +1,1725 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ * Copyright (c) 2019-2020 Marvell International Ltd.
+ */
+
+#ifndef REG_ADDR_H
+#define REG_ADDR_H
+
+#define CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT \
+ 0
+
+#define CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE ( \
+ 0xfff << 0)
+
+#define CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT \
+ 12
+
+#define CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE ( \
+ 0xfff << 12)
+
+#define CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT \
+ 24
+
+#define CDU_REG_CID_ADDR_PARAMS_NCIB ( \
+ 0xff << 24)
+
+#define CDU_REG_SEGMENT0_PARAMS \
+ 0x580904UL
+#define CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK \
+ (0xfff << 0)
+#define CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK_SHIFT \
+ 0
+#define CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE \
+ (0xff << 16)
+#define CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE_SHIFT \
+ 16
+#define CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE \
+ (0xff << 24)
+#define CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE_SHIFT \
+ 24
+#define CDU_REG_SEGMENT1_PARAMS \
+ 0x580908UL
+#define CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK \
+ (0xfff << 0)
+#define CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK_SHIFT \
+ 0
+#define CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE \
+ (0xff << 16)
+#define CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE_SHIFT \
+ 16
+#define CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE \
+ (0xff << 24)
+#define CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE_SHIFT \
+ 24
+
+#define XSDM_REG_OPERATION_GEN \
+ 0xf80408UL
+#define NIG_REG_RX_BRB_OUT_EN \
+ 0x500e18UL
+#define NIG_REG_STORM_OUT_EN \
+ 0x500e08UL
+#define PSWRQ2_REG_L2P_VALIDATE_VFID \
+ 0x240c50UL
+#define PGLUE_B_REG_USE_CLIENTID_IN_TAG \
+ 0x2aae04UL
+#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER \
+ 0x2aa16cUL
+#define PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR \
+ 0x2aa118UL
+#define PSWHST_REG_ZONE_PERMISSION_TABLE \
+ 0x2a0800UL
+#define BAR0_MAP_REG_MSDM_RAM \
+ 0x1d00000UL
+#define BAR0_MAP_REG_USDM_RAM \
+ 0x1d80000UL
+#define BAR0_MAP_REG_PSDM_RAM \
+ 0x1f00000UL
+#define BAR0_MAP_REG_TSDM_RAM \
+ 0x1c80000UL
+#define BAR0_MAP_REG_XSDM_RAM \
+ 0x1e00000UL
+#define BAR0_MAP_REG_YSDM_RAM \
+ 0x1e80000UL
+#define NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF \
+ 0x5011f4UL
+#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE \
+ 0x1f0164UL
+#define PRS_REG_SEARCH_TCP \
+ 0x1f0400UL
+#define PRS_REG_SEARCH_UDP \
+ 0x1f0404UL
+#define PRS_REG_SEARCH_FCOE \
+ 0x1f0408UL
+#define PRS_REG_SEARCH_ROCE \
+ 0x1f040cUL
+#define PRS_REG_SEARCH_OPENFLOW \
+ 0x1f0434UL
+#define PRS_REG_SEARCH_TAG1 \
+ 0x1f0444UL
+#define PRS_REG_SEARCH_TENANT_ID \
+ 0x1f044cUL
+#define PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST \
+ 0x1f0a0cUL
+#define PRS_REG_SEARCH_TCP_FIRST_FRAG \
+ 0x1f0410UL
+#define TM_REG_PF_ENABLE_CONN \
+ 0x2c043cUL
+#define TM_REG_PF_ENABLE_TASK \
+ 0x2c0444UL
+#define TM_REG_PF_SCAN_ACTIVE_CONN \
+ 0x2c04fcUL
+#define TM_REG_PF_SCAN_ACTIVE_TASK \
+ 0x2c0500UL
+#define IGU_REG_LEADING_EDGE_LATCH \
+ 0x18082cUL
+#define IGU_REG_TRAILING_EDGE_LATCH \
+ 0x180830UL
+#define QM_REG_USG_CNT_PF_TX \
+ 0x2f2eacUL
+#define QM_REG_USG_CNT_PF_OTHER \
+ 0x2f2eb0UL
+#define DORQ_REG_PF_DB_ENABLE \
+ 0x100508UL
+#define DORQ_REG_VF_USAGE_CNT \
+ 0x1009c4UL
+#define QM_REG_PF_EN \
+ 0x2f2ea4UL
+#define QM_REG_RLGLBLUPPERBOUND \
+ 0x2f3c00UL
+#define TCFC_REG_WEAK_ENABLE_VF \
+ 0x2d0704UL
+#define TCFC_REG_STRONG_ENABLE_PF \
+ 0x2d0708UL
+#define TCFC_REG_STRONG_ENABLE_VF \
+ 0x2d070cUL
+#define CCFC_REG_WEAK_ENABLE_VF \
+ 0x2e0704UL
+#define CCFC_REG_STRONG_ENABLE_PF \
+ 0x2e0708UL
+#define PGLUE_B_REG_PGL_ADDR_88_F0_BB \
+ 0x2aa404UL
+#define PGLUE_B_REG_PGL_ADDR_8C_F0_BB \
+ 0x2aa408UL
+#define PGLUE_B_REG_PGL_ADDR_90_F0_BB \
+ 0x2aa40cUL
+#define PGLUE_B_REG_PGL_ADDR_94_F0_BB \
+ 0x2aa410UL
+#define PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR \
+ 0x2aa138UL
+#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ \
+ 0x2aa174UL
+#define MISC_REG_GEN_PURP_CR0 \
+ 0x008c80UL
+#define MCP_REG_SCRATCH \
+ 0xe20000UL
+#define MCP_REG_SCRATCH_SIZE \
+ 57344
+#define CNIG_REG_NW_PORT_MODE_BB \
+ 0x218200UL
+#define MISCS_REG_CHIP_NUM \
+ 0x00976cUL
+#define MISCS_REG_CHIP_REV \
+ 0x009770UL
+#define MISCS_REG_CMT_ENABLED_FOR_PAIR \
+ 0x00971cUL
+#define MISCS_REG_CHIP_TEST_REG \
+ 0x009778UL
+#define MISCS_REG_CHIP_METAL \
+ 0x009774UL
+#define MISCS_REG_FUNCTION_HIDE \
+ 0x0096f0UL
+#define BRB_REG_HEADER_SIZE \
+ 0x340804UL
+#define BTB_REG_HEADER_SIZE \
+ 0xdb0804UL
+#define CAU_REG_LONG_TIMEOUT_THRESHOLD \
+ 0x1c0708UL
+#define CCFC_REG_ACTIVITY_COUNTER \
+ 0x2e8800UL
+#define CCFC_REG_STRONG_ENABLE_VF \
+ 0x2e070cUL
+#define CDU_REG_CCFC_CTX_VALID0 \
+ 0x580400UL
+#define CDU_REG_CCFC_CTX_VALID1 \
+ 0x580404UL
+#define CDU_REG_TCFC_CTX_VALID0 \
+ 0x580408UL
+#define CDU_REG_CID_ADDR_PARAMS \
+ 0x580900UL
+#define DBG_REG_CLIENT_ENABLE \
+ 0x010004UL
+#define DBG_REG_TIMESTAMP_VALID_EN \
+ 0x010b58UL
+#define DMAE_REG_INIT \
+ 0x00c000UL
+#define DORQ_REG_IFEN \
+ 0x100040UL
+#define DORQ_REG_TAG1_OVRD_MODE \
+ 0x1008b4UL
+#define DORQ_REG_PF_PCP_BB_K2 \
+ 0x1008c4UL
+#define DORQ_REG_PF_EXT_VID_BB_K2 \
+ 0x1008c8UL
+#define DORQ_REG_DB_DROP_REASON \
+ 0x100a2cUL
+#define DORQ_REG_DB_DROP_DETAILS \
+ 0x100a24UL
+#define DORQ_REG_DB_DROP_DETAILS_ADDRESS \
+ 0x100a1cUL
+#define GRC_REG_TIMEOUT_EN \
+ 0x050404UL
+#define GRC_REG_TIMEOUT_ATTN_ACCESS_VALID \
+ 0x050054UL
+#define GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0 \
+ 0x05004cUL
+#define GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1 \
+ 0x050050UL
+#define IGU_REG_BLOCK_CONFIGURATION \
+ 0x180040UL
+#define MCM_REG_INIT \
+ 0x1200000UL
+#define MCP2_REG_DBG_DWORD_ENABLE \
+ 0x052404UL
+#define MISC_REG_PORT_MODE \
+ 0x008c00UL
+#define MISCS_REG_CLK_100G_MODE \
+ 0x009070UL
+#define MSDM_REG_ENABLE_IN1 \
+ 0xfc0004UL
+#define MSEM_REG_ENABLE_IN \
+ 0x1800004UL
+#define NIG_REG_CM_HDR \
+ 0x500840UL
+#define NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR \
+ 0x50196cUL
+#define NIG_REG_LLH_PPFID2PFID_TBL_0 \
+ 0x501970UL
+#define NIG_REG_LLH_ENG_CLS_ROCE_QP_SEL \
+ 0x50
+#define NIG_REG_LLH_CLS_TYPE_DUALMODE \
+ 0x501964UL
+#define NIG_REG_LLH_FUNC_TAG_EN 0x5019b0UL
+#define NIG_REG_LLH_FUNC_TAG_VALUE 0x5019d0UL
+#define NIG_REG_LLH_FUNC_FILTER_VALUE \
+ 0x501a00UL
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_SIZE \
+ 32
+#define NIG_REG_LLH_FUNC_FILTER_EN \
+ 0x501a80UL
+#define NIG_REG_LLH_FUNC_FILTER_EN_SIZE \
+ 16
+#define NIG_REG_LLH_FUNC_FILTER_MODE \
+ 0x501ac0UL
+#define NIG_REG_LLH_FUNC_FILTER_MODE_SIZE \
+ 16
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE \
+ 0x501b00UL
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_SIZE \
+ 16
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL \
+ 0x501b40UL
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_SIZE \
+ 16
+#define NCSI_REG_CONFIG \
+ 0x040200UL
+#define PBF_REG_INIT \
+ 0xd80000UL
+#define PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 \
+ 0xd806c8UL
+#define PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 \
+ 0xd806ccUL
+#define PTU_REG_ATC_INIT_ARRAY \
+ 0x560000UL
+#define PCM_REG_INIT \
+ 0x1100000UL
+#define PGLUE_B_REG_ADMIN_PER_PF_REGION \
+ 0x2a9000UL
+#define PGLUE_B_REG_TX_ERR_WR_DETAILS2 \
+ 0x2aa150UL
+#define PGLUE_B_REG_TX_ERR_WR_ADD_31_0 \
+ 0x2aa144UL
+#define PGLUE_B_REG_TX_ERR_WR_ADD_63_32 \
+ 0x2aa148UL
+#define PGLUE_B_REG_TX_ERR_WR_DETAILS \
+ 0x2aa14cUL
+#define PGLUE_B_REG_TX_ERR_RD_ADD_31_0 \
+ 0x2aa154UL
+#define PGLUE_B_REG_TX_ERR_RD_ADD_63_32 \
+ 0x2aa158UL
+#define PGLUE_B_REG_TX_ERR_RD_DETAILS \
+ 0x2aa15cUL
+#define PGLUE_B_REG_TX_ERR_RD_DETAILS2 \
+ 0x2aa160UL
+#define PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL \
+ 0x2aa164UL
+#define PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS \
+ 0x2aa54cUL
+#define PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0 \
+ 0x2aa544UL
+#define PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32 \
+ 0x2aa548UL
+#define PGLUE_B_REG_VF_ILT_ERR_ADD_31_0 \
+ 0x2aae74UL
+#define PGLUE_B_REG_VF_ILT_ERR_ADD_63_32 \
+ 0x2aae78UL
+#define PGLUE_B_REG_VF_ILT_ERR_DETAILS \
+ 0x2aae7cUL
+#define PGLUE_B_REG_VF_ILT_ERR_DETAILS2 \
+ 0x2aae80UL
+#define PGLUE_B_REG_LATCHED_ERRORS_CLR \
+ 0x2aa3bcUL
+#define PRM_REG_DISABLE_PRM \
+ 0x230000UL
+#define PRS_REG_SOFT_RST \
+ 0x1f0000UL
+#define PRS_REG_MSG_INFO \
+ 0x1f0a1cUL
+#define PRS_REG_ROCE_DEST_QP_MAX_PF \
+ 0x1f0430UL
+#define PRS_REG_USE_LIGHT_L2 \
+ 0x1f096cUL
+#define PSDM_REG_ENABLE_IN1 \
+ 0xfa0004UL
+#define PSEM_REG_ENABLE_IN \
+ 0x1600004UL
+#define PSWRQ_REG_DBG_SELECT \
+ 0x280020UL
+#define PSWRQ2_REG_CDUT_P_SIZE \
+ 0x24000cUL
+#define PSWRQ2_REG_ILT_MEMORY \
+ 0x260000UL
+#define PSWRQ2_REG_ILT_MEMORY_SIZE_BB \
+ 15200
+#define PSWRQ2_REG_ILT_MEMORY_SIZE_K2 \
+ 22000
+#define PSWHST_REG_DISCARD_INTERNAL_WRITES \
+ 0x2a0040UL
+#define PSWHST2_REG_DBGSYN_ALMOST_FULL_THR \
+ 0x29e050UL
+#define PSWHST_REG_INCORRECT_ACCESS_VALID \
+ 0x2a0070UL
+#define PSWHST_REG_INCORRECT_ACCESS_ADDRESS \
+ 0x2a0074UL
+#define PSWHST_REG_INCORRECT_ACCESS_DATA \
+ 0x2a0068UL
+#define PSWHST_REG_INCORRECT_ACCESS_LENGTH \
+ 0x2a006cUL
+#define PSWRD_REG_DBG_SELECT \
+ 0x29c040UL
+#define PSWRD2_REG_CONF11 \
+ 0x29d064UL
+#define PSWWR_REG_USDM_FULL_TH \
+ 0x29a040UL
+#define PSWWR2_REG_CDU_FULL_TH2 \
+ 0x29b040UL
+#define QM_REG_MAXPQSIZE_0 \
+ 0x2f0434UL
+#define RSS_REG_RSS_INIT_EN \
+ 0x238804UL
+#define RDIF_REG_STOP_ON_ERROR \
+ 0x300040UL
+#define RDIF_REG_DEBUG_ERROR_INFO \
+ 0x300400UL
+#define RDIF_REG_DEBUG_ERROR_INFO_SIZE \
+ 64
+#define SRC_REG_SOFT_RST \
+ 0x23874cUL
+#define TCFC_REG_ACTIVITY_COUNTER \
+ 0x2d8800UL
+#define TCM_REG_INIT \
+ 0x1180000UL
+#define TM_REG_PXP_READ_DATA_FIFO_INIT \
+ 0x2c0014UL
+#define TSDM_REG_ENABLE_IN1 \
+ 0xfb0004UL
+#define TSEM_REG_ENABLE_IN \
+ 0x1700004UL
+#define TDIF_REG_STOP_ON_ERROR \
+ 0x310040UL
+#define TDIF_REG_DEBUG_ERROR_INFO \
+ 0x310400UL
+#define TDIF_REG_DEBUG_ERROR_INFO_SIZE \
+ 64
+#define UCM_REG_INIT \
+ 0x1280000UL
+#define UMAC_REG_IPG_HD_BKP_CNTL_BB_B0 \
+ 0x051004UL
+#define USDM_REG_ENABLE_IN1 \
+ 0xfd0004UL
+#define USEM_REG_ENABLE_IN \
+ 0x1900004UL
+#define XCM_REG_INIT \
+ 0x1000000UL
+#define XSDM_REG_ENABLE_IN1 \
+ 0xf80004UL
+#define XSEM_REG_ENABLE_IN \
+ 0x1400004UL
+#define YCM_REG_INIT \
+ 0x1080000UL
+#define YSDM_REG_ENABLE_IN1 \
+ 0xf90004UL
+#define YSEM_REG_ENABLE_IN \
+ 0x1500004UL
+#define XYLD_REG_SCBD_STRICT_PRIO \
+ 0x4c0000UL
+#define TMLD_REG_SCBD_STRICT_PRIO \
+ 0x4d0000UL
+#define MULD_REG_SCBD_STRICT_PRIO \
+ 0x4e0000UL
+#define YULD_REG_SCBD_STRICT_PRIO \
+ 0x4c8000UL
+#define MISC_REG_SHARED_MEM_ADDR \
+ 0x008c20UL
+#define DMAE_REG_GO_C0 \
+ 0x00c048UL
+#define DMAE_REG_GO_C1 \
+ 0x00c04cUL
+#define DMAE_REG_GO_C2 \
+ 0x00c050UL
+#define DMAE_REG_GO_C3 \
+ 0x00c054UL
+#define DMAE_REG_GO_C4 \
+ 0x00c058UL
+#define DMAE_REG_GO_C5 \
+ 0x00c05cUL
+#define DMAE_REG_GO_C6 \
+ 0x00c060UL
+#define DMAE_REG_GO_C7 \
+ 0x00c064UL
+#define DMAE_REG_GO_C8 \
+ 0x00c068UL
+#define DMAE_REG_GO_C9 \
+ 0x00c06cUL
+#define DMAE_REG_GO_C10 \
+ 0x00c070UL
+#define DMAE_REG_GO_C11 \
+ 0x00c074UL
+#define DMAE_REG_GO_C12 \
+ 0x00c078UL
+#define DMAE_REG_GO_C13 \
+ 0x00c07cUL
+#define DMAE_REG_GO_C14 \
+ 0x00c080UL
+#define DMAE_REG_GO_C15 \
+ 0x00c084UL
+#define DMAE_REG_GO_C16 \
+ 0x00c088UL
+#define DMAE_REG_GO_C17 \
+ 0x00c08cUL
+#define DMAE_REG_GO_C18 \
+ 0x00c090UL
+#define DMAE_REG_GO_C19 \
+ 0x00c094UL
+#define DMAE_REG_GO_C20 \
+ 0x00c098UL
+#define DMAE_REG_GO_C21 \
+ 0x00c09cUL
+#define DMAE_REG_GO_C22 \
+ 0x00c0a0UL
+#define DMAE_REG_GO_C23 \
+ 0x00c0a4UL
+#define DMAE_REG_GO_C24 \
+ 0x00c0a8UL
+#define DMAE_REG_GO_C25 \
+ 0x00c0acUL
+#define DMAE_REG_GO_C26 \
+ 0x00c0b0UL
+#define DMAE_REG_GO_C27 \
+ 0x00c0b4UL
+#define DMAE_REG_GO_C28 \
+ 0x00c0b8UL
+#define DMAE_REG_GO_C29 \
+ 0x00c0bcUL
+#define DMAE_REG_GO_C30 \
+ 0x00c0c0UL
+#define DMAE_REG_GO_C31 \
+ 0x00c0c4UL
+#define DMAE_REG_CMD_MEM \
+ 0x00c800UL
+#define QM_REG_MAXPQSIZETXSEL_0 \
+ 0x2f0440UL
+#define QM_REG_SDMCMDREADY \
+ 0x2f1e10UL
+#define QM_REG_SDMCMDADDR \
+ 0x2f1e04UL
+#define QM_REG_SDMCMDDATALSB \
+ 0x2f1e08UL
+#define QM_REG_SDMCMDDATAMSB \
+ 0x2f1e0cUL
+#define QM_REG_SDMCMDGO \
+ 0x2f1e14UL
+#define QM_REG_RLPFCRD \
+ 0x2f4d80UL
+#define QM_REG_RLPFINCVAL \
+ 0x2f4c80UL
+#define QM_REG_RLGLBLCRD \
+ 0x2f4400UL
+#define QM_REG_RLGLBLINCVAL \
+ 0x2f3400UL
+#define IGU_REG_ATTENTION_ENABLE \
+ 0x18083cUL
+#define IGU_REG_ATTN_MSG_ADDR_L \
+ 0x180820UL
+#define IGU_REG_ATTN_MSG_ADDR_H \
+ 0x180824UL
+#define MISC_REG_AEU_GENERAL_ATTN_0 \
+ 0x008400UL
+#define MISC_REG_AEU_GENERAL_ATTN_32 \
+ 0x008480UL
+#define MISC_REG_AEU_GENERAL_ATTN_35 \
+ 0x00848cUL
+#define CAU_REG_SB_ADDR_MEMORY \
+ 0x1c8000UL
+#define CAU_REG_SB_VAR_MEMORY \
+ 0x1c6000UL
+#define CAU_REG_PI_MEMORY \
+ 0x1d0000UL
+#define IGU_REG_PF_CONFIGURATION \
+ 0x180800UL
+#define IGU_REG_VF_CONFIGURATION \
+ 0x180804UL
+#define MISC_REG_AEU_ENABLE1_IGU_OUT_0 \
+ 0x00849cUL
+#define MISC_REG_AEU_ENABLE4_IGU_OUT_0 \
+ 0x0084a8UL
+#define MISC_REG_AEU_ENABLE4_IGU_OUT_0_GENERAL_ATTN32 \
+ (0x1UL << 0)
+#define MISC_REG_AEU_ENABLE4_IGU_OUT_0_GENERAL_ATTN32_SHIFT \
+ 0
+#define MISC_REG_AEU_AFTER_INVERT_1_IGU \
+ 0x0087b4UL
+#define MISC_REG_AEU_MASK_ATTN_IGU \
+ 0x008494UL
+#define IGU_REG_CLEANUP_STATUS_0 \
+ 0x180980UL
+#define IGU_REG_CLEANUP_STATUS_1 \
+ 0x180a00UL
+#define IGU_REG_CLEANUP_STATUS_2 \
+ 0x180a80UL
+#define IGU_REG_CLEANUP_STATUS_3 \
+ 0x180b00UL
+#define IGU_REG_CLEANUP_STATUS_4 \
+ 0x180b80UL
+#define IGU_REG_COMMAND_REG_32LSB_DATA \
+ 0x180840UL
+#define IGU_REG_COMMAND_REG_CTRL \
+ 0x180848UL
+#define IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN ( \
+ 0x1 << 1)
+#define IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN ( \
+ 0x1 << 0)
+#define IGU_REG_PRODUCER_MEMORY 0x182000UL
+#define IGU_REG_CONSUMER_MEM 0x183000UL
+#define IGU_REG_MAPPING_MEMORY \
+ 0x184000UL
+#define IGU_REG_STATISTIC_NUM_VF_MSG_SENT \
+ 0x180408UL
+#define IGU_REG_WRITE_DONE_PENDING \
+ 0x180900UL
+#define MISCS_REG_GENERIC_POR_0 \
+ 0x0096d4UL
+#define MCP_REG_NVM_CFG4 \
+ 0xe0642cUL
+#define MCP_REG_NVM_CFG4_FLASH_SIZE ( \
+ 0x7 << 0)
+#define MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT \
+ 0
+#define MCP_REG_CPU_STATE \
+ 0xe05004UL
+#define MCP_REG_CPU_STATE_SOFT_HALTED (0x1UL << 10)
+#define MCP_REG_CPU_EVENT_MASK \
+ 0xe05008UL
+#define MCP_REG_CPU_PROGRAM_COUNTER 0xe0501cUL
+#define PGLUE_B_REG_PF_BAR0_SIZE \
+ 0x2aae60UL
+#define PGLUE_B_REG_PF_BAR1_SIZE \
+ 0x2aae64UL
+#define PGLUE_B_REG_VF_BAR1_SIZE 0x2aae68UL
+#define PRS_REG_ENCAPSULATION_TYPE_EN 0x1f0730UL
+#define PRS_REG_GRE_PROTOCOL 0x1f0734UL
+#define PRS_REG_VXLAN_PORT 0x1f0738UL
+#define PRS_REG_OUTPUT_FORMAT_4_0 0x1f099cUL
+#define NIG_REG_ENC_TYPE_ENABLE 0x501058UL
+
+#define NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE (0x1 << 0)
+#define NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT 0
+#define NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE (0x1 << 1)
+#define NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT 1
+#define NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE (0x1 << 2)
+#define NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT 2
+
+#define NIG_REG_VXLAN_CTRL 0x50105cUL
+#define PBF_REG_VXLAN_PORT 0xd80518UL
+#define PBF_REG_NGE_PORT 0xd8051cUL
+#define PRS_REG_NGE_PORT 0x1f086cUL
+#define NIG_REG_NGE_PORT 0x508b38UL
+
+#define DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN 0x10090cUL
+#define DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN 0x100910UL
+#define DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN 0x100914UL
+#define DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2 0x10092cUL
+#define DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2 0x100930UL
+
+#define NIG_REG_NGE_IP_ENABLE 0x508b28UL
+#define NIG_REG_NGE_ETH_ENABLE 0x508b2cUL
+#define NIG_REG_NGE_COMP_VER 0x508b30UL
+#define PBF_REG_NGE_COMP_VER 0xd80524UL
+#define PRS_REG_NGE_COMP_VER 0x1f0878UL
+
+#define QM_REG_WFQPFWEIGHT 0x2f4e80UL
+#define QM_REG_WFQVPWEIGHT 0x2fa000UL
+#define QM_REG_WFQVPUPPERBOUND \
+ 0x2fb000UL
+#define QM_REG_WFQVPCRD \
+ 0x2fc000UL
+#define PGLCS_REG_DBG_SELECT_K2_E5 \
+ 0x001d14UL
+#define PGLCS_REG_DBG_DWORD_ENABLE_K2_E5 \
+ 0x001d18UL
+#define PGLCS_REG_DBG_SHIFT_K2_E5 \
+ 0x001d1cUL
+#define PGLCS_REG_DBG_FORCE_VALID_K2_E5 \
+ 0x001d20UL
+#define PGLCS_REG_DBG_FORCE_FRAME_K2_E5 \
+ 0x001d24UL
+#define MISC_REG_RESET_PL_PDA_VMAIN_1 \
+ 0x008070UL
+#define MISC_REG_RESET_PL_PDA_VMAIN_2 \
+ 0x008080UL
+#define MISC_REG_RESET_PL_PDA_VAUX \
+ 0x008090UL
+#define MISCS_REG_RESET_PL_UA \
+ 0x009050UL
+#define MISCS_REG_RESET_PL_HV \
+ 0x009060UL
+#define MISCS_REG_RESET_PL_HV_2_K2_E5 \
+ 0x009150UL
+#define DMAE_REG_DBG_SELECT \
+ 0x00c510UL
+#define DMAE_REG_DBG_DWORD_ENABLE \
+ 0x00c514UL
+#define DMAE_REG_DBG_SHIFT \
+ 0x00c518UL
+#define DMAE_REG_DBG_FORCE_VALID \
+ 0x00c51cUL
+#define DMAE_REG_DBG_FORCE_FRAME \
+ 0x00c520UL
+#define NCSI_REG_DBG_SELECT \
+ 0x040474UL
+#define NCSI_REG_DBG_DWORD_ENABLE \
+ 0x040478UL
+#define NCSI_REG_DBG_SHIFT \
+ 0x04047cUL
+#define NCSI_REG_DBG_FORCE_VALID \
+ 0x040480UL
+#define NCSI_REG_DBG_FORCE_FRAME \
+ 0x040484UL
+#define GRC_REG_DBG_SELECT \
+ 0x0500a4UL
+#define GRC_REG_DBG_DWORD_ENABLE \
+ 0x0500a8UL
+#define GRC_REG_DBG_SHIFT \
+ 0x0500acUL
+#define GRC_REG_DBG_FORCE_VALID \
+ 0x0500b0UL
+#define GRC_REG_DBG_FORCE_FRAME \
+ 0x0500b4UL
+#define UMAC_REG_DBG_SELECT_K2_E5 \
+ 0x051094UL
+#define UMAC_REG_DBG_DWORD_ENABLE_K2_E5 \
+ 0x051098UL
+#define UMAC_REG_DBG_SHIFT_K2_E5 \
+ 0x05109cUL
+#define UMAC_REG_DBG_FORCE_VALID_K2_E5 \
+ 0x0510a0UL
+#define UMAC_REG_DBG_FORCE_FRAME_K2_E5 \
+ 0x0510a4UL
+#define MCP2_REG_DBG_SELECT \
+ 0x052400UL
+#define MCP2_REG_DBG_DWORD_ENABLE \
+ 0x052404UL
+#define MCP2_REG_DBG_SHIFT \
+ 0x052408UL
+#define MCP2_REG_DBG_FORCE_VALID \
+ 0x052440UL
+#define MCP2_REG_DBG_FORCE_FRAME \
+ 0x052444UL
+#define PCIE_REG_DBG_SELECT \
+ 0x0547e8UL
+#define PCIE_REG_DBG_DWORD_ENABLE \
+ 0x0547ecUL
+#define PCIE_REG_DBG_SHIFT \
+ 0x0547f0UL
+#define PCIE_REG_DBG_FORCE_VALID \
+ 0x0547f4UL
+#define PCIE_REG_DBG_FORCE_FRAME \
+ 0x0547f8UL
+#define DORQ_REG_DBG_SELECT \
+ 0x100ad0UL
+#define DORQ_REG_DBG_DWORD_ENABLE \
+ 0x100ad4UL
+#define DORQ_REG_DBG_SHIFT \
+ 0x100ad8UL
+#define DORQ_REG_DBG_FORCE_VALID \
+ 0x100adcUL
+#define DORQ_REG_DBG_FORCE_FRAME \
+ 0x100ae0UL
+#define IGU_REG_DBG_SELECT \
+ 0x181578UL
+#define IGU_REG_DBG_DWORD_ENABLE \
+ 0x18157cUL
+#define IGU_REG_DBG_SHIFT \
+ 0x181580UL
+#define IGU_REG_DBG_FORCE_VALID \
+ 0x181584UL
+#define IGU_REG_DBG_FORCE_FRAME \
+ 0x181588UL
+#define CAU_REG_DBG_SELECT \
+ 0x1c0ea8UL
+#define CAU_REG_DBG_DWORD_ENABLE \
+ 0x1c0eacUL
+#define CAU_REG_DBG_SHIFT \
+ 0x1c0eb0UL
+#define CAU_REG_DBG_FORCE_VALID \
+ 0x1c0eb4UL
+#define CAU_REG_DBG_FORCE_FRAME \
+ 0x1c0eb8UL
+#define PRS_REG_DBG_SELECT \
+ 0x1f0b6cUL
+#define PRS_REG_DBG_DWORD_ENABLE \
+ 0x1f0b70UL
+#define PRS_REG_DBG_SHIFT \
+ 0x1f0b74UL
+#define PRS_REG_DBG_FORCE_VALID \
+ 0x1f0ba0UL
+#define PRS_REG_DBG_FORCE_FRAME \
+ 0x1f0ba4UL
+#define CNIG_REG_DBG_SELECT_K2_E5 \
+ 0x218254UL
+#define CNIG_REG_DBG_DWORD_ENABLE_K2_E5 \
+ 0x218258UL
+#define CNIG_REG_DBG_SHIFT_K2_E5 \
+ 0x21825cUL
+#define CNIG_REG_DBG_FORCE_VALID_K2_E5 \
+ 0x218260UL
+#define CNIG_REG_DBG_FORCE_FRAME_K2_E5 \
+ 0x218264UL
+#define PRM_REG_DBG_SELECT \
+ 0x2306a8UL
+#define PRM_REG_DBG_DWORD_ENABLE \
+ 0x2306acUL
+#define PRM_REG_DBG_SHIFT \
+ 0x2306b0UL
+#define PRM_REG_DBG_FORCE_VALID \
+ 0x2306b4UL
+#define PRM_REG_DBG_FORCE_FRAME \
+ 0x2306b8UL
+#define SRC_REG_DBG_SELECT \
+ 0x238700UL
+#define SRC_REG_DBG_DWORD_ENABLE \
+ 0x238704UL
+#define SRC_REG_DBG_SHIFT \
+ 0x238708UL
+#define SRC_REG_DBG_FORCE_VALID \
+ 0x23870cUL
+#define SRC_REG_DBG_FORCE_FRAME \
+ 0x238710UL
+#define RSS_REG_DBG_SELECT \
+ 0x238c4cUL
+#define RSS_REG_DBG_DWORD_ENABLE \
+ 0x238c50UL
+#define RSS_REG_DBG_SHIFT \
+ 0x238c54UL
+#define RSS_REG_DBG_FORCE_VALID \
+ 0x238c58UL
+#define RSS_REG_DBG_FORCE_FRAME \
+ 0x238c5cUL
+#define RPB_REG_DBG_SELECT \
+ 0x23c728UL
+#define RPB_REG_DBG_DWORD_ENABLE \
+ 0x23c72cUL
+#define RPB_REG_DBG_SHIFT \
+ 0x23c730UL
+#define RPB_REG_DBG_FORCE_VALID \
+ 0x23c734UL
+#define RPB_REG_DBG_FORCE_FRAME \
+ 0x23c738UL
+#define PSWRQ2_REG_DBG_SELECT \
+ 0x240100UL
+#define PSWRQ2_REG_DBG_DWORD_ENABLE \
+ 0x240104UL
+#define PSWRQ2_REG_DBG_SHIFT \
+ 0x240108UL
+#define PSWRQ2_REG_DBG_FORCE_VALID \
+ 0x24010cUL
+#define PSWRQ2_REG_DBG_FORCE_FRAME \
+ 0x240110UL
+#define PSWRQ_REG_DBG_SELECT \
+ 0x280020UL
+#define PSWRQ_REG_DBG_DWORD_ENABLE \
+ 0x280024UL
+#define PSWRQ_REG_DBG_SHIFT \
+ 0x280028UL
+#define PSWRQ_REG_DBG_FORCE_VALID \
+ 0x28002cUL
+#define PSWRQ_REG_DBG_FORCE_FRAME \
+ 0x280030UL
+#define PSWWR_REG_DBG_SELECT \
+ 0x29a084UL
+#define PSWWR_REG_DBG_DWORD_ENABLE \
+ 0x29a088UL
+#define PSWWR_REG_DBG_SHIFT \
+ 0x29a08cUL
+#define PSWWR_REG_DBG_FORCE_VALID \
+ 0x29a090UL
+#define PSWWR_REG_DBG_FORCE_FRAME \
+ 0x29a094UL
+#define PSWRD_REG_DBG_SELECT \
+ 0x29c040UL
+#define PSWRD_REG_DBG_DWORD_ENABLE \
+ 0x29c044UL
+#define PSWRD_REG_DBG_SHIFT \
+ 0x29c048UL
+#define PSWRD_REG_DBG_FORCE_VALID \
+ 0x29c04cUL
+#define PSWRD_REG_DBG_FORCE_FRAME \
+ 0x29c050UL
+#define PSWRD2_REG_DBG_SELECT \
+ 0x29d400UL
+#define PSWRD2_REG_DBG_DWORD_ENABLE \
+ 0x29d404UL
+#define PSWRD2_REG_DBG_SHIFT \
+ 0x29d408UL
+#define PSWRD2_REG_DBG_FORCE_VALID \
+ 0x29d40cUL
+#define PSWRD2_REG_DBG_FORCE_FRAME \
+ 0x29d410UL
+#define PSWHST2_REG_DBG_SELECT \
+ 0x29e058UL
+#define PSWHST2_REG_DBG_DWORD_ENABLE \
+ 0x29e05cUL
+#define PSWHST2_REG_DBG_SHIFT \
+ 0x29e060UL
+#define PSWHST2_REG_DBG_FORCE_VALID \
+ 0x29e064UL
+#define PSWHST2_REG_DBG_FORCE_FRAME \
+ 0x29e068UL
+#define PSWHST_REG_DBG_SELECT \
+ 0x2a0100UL
+#define PSWHST_REG_DBG_DWORD_ENABLE \
+ 0x2a0104UL
+#define PSWHST_REG_DBG_SHIFT \
+ 0x2a0108UL
+#define PSWHST_REG_DBG_FORCE_VALID \
+ 0x2a010cUL
+#define PSWHST_REG_DBG_FORCE_FRAME \
+ 0x2a0110UL
+#define PGLUE_B_REG_DBG_SELECT \
+ 0x2a8400UL
+#define PGLUE_B_REG_DBG_DWORD_ENABLE \
+ 0x2a8404UL
+#define PGLUE_B_REG_DBG_SHIFT \
+ 0x2a8408UL
+#define PGLUE_B_REG_DBG_FORCE_VALID \
+ 0x2a840cUL
+#define PGLUE_B_REG_DBG_FORCE_FRAME \
+ 0x2a8410UL
+#define TM_REG_DBG_SELECT \
+ 0x2c07a8UL
+#define TM_REG_DBG_DWORD_ENABLE \
+ 0x2c07acUL
+#define TM_REG_DBG_SHIFT \
+ 0x2c07b0UL
+#define TM_REG_DBG_FORCE_VALID \
+ 0x2c07b4UL
+#define TM_REG_DBG_FORCE_FRAME \
+ 0x2c07b8UL
+#define TCFC_REG_DBG_SELECT \
+ 0x2d0500UL
+#define TCFC_REG_DBG_DWORD_ENABLE \
+ 0x2d0504UL
+#define TCFC_REG_DBG_SHIFT \
+ 0x2d0508UL
+#define TCFC_REG_DBG_FORCE_VALID \
+ 0x2d050cUL
+#define TCFC_REG_DBG_FORCE_FRAME \
+ 0x2d0510UL
+#define CCFC_REG_DBG_SELECT \
+ 0x2e0500UL
+#define CCFC_REG_DBG_DWORD_ENABLE \
+ 0x2e0504UL
+#define CCFC_REG_DBG_SHIFT \
+ 0x2e0508UL
+#define CCFC_REG_DBG_FORCE_VALID \
+ 0x2e050cUL
+#define CCFC_REG_DBG_FORCE_FRAME \
+ 0x2e0510UL
+#define QM_REG_DBG_SELECT \
+ 0x2f2e74UL
+#define QM_REG_DBG_DWORD_ENABLE \
+ 0x2f2e78UL
+#define QM_REG_DBG_SHIFT \
+ 0x2f2e7cUL
+#define QM_REG_DBG_FORCE_VALID \
+ 0x2f2e80UL
+#define QM_REG_DBG_FORCE_FRAME \
+ 0x2f2e84UL
+#define RDIF_REG_DBG_SELECT \
+ 0x300500UL
+#define RDIF_REG_DBG_DWORD_ENABLE \
+ 0x300504UL
+#define RDIF_REG_DBG_SHIFT \
+ 0x300508UL
+#define RDIF_REG_DBG_FORCE_VALID \
+ 0x30050cUL
+#define RDIF_REG_DBG_FORCE_FRAME \
+ 0x300510UL
+#define TDIF_REG_DBG_SELECT \
+ 0x310500UL
+#define TDIF_REG_DBG_DWORD_ENABLE \
+ 0x310504UL
+#define TDIF_REG_DBG_SHIFT \
+ 0x310508UL
+#define TDIF_REG_DBG_FORCE_VALID \
+ 0x31050cUL
+#define TDIF_REG_DBG_FORCE_FRAME \
+ 0x310510UL
+#define BRB_REG_DBG_SELECT \
+ 0x340ed0UL
+#define BRB_REG_DBG_DWORD_ENABLE \
+ 0x340ed4UL
+#define BRB_REG_DBG_SHIFT \
+ 0x340ed8UL
+#define BRB_REG_DBG_FORCE_VALID \
+ 0x340edcUL
+#define BRB_REG_DBG_FORCE_FRAME \
+ 0x340ee0UL
+#define XYLD_REG_DBG_SELECT \
+ 0x4c1600UL
+#define XYLD_REG_DBG_DWORD_ENABLE \
+ 0x4c1604UL
+#define XYLD_REG_DBG_SHIFT \
+ 0x4c1608UL
+#define XYLD_REG_DBG_FORCE_VALID \
+ 0x4c160cUL
+#define XYLD_REG_DBG_FORCE_FRAME \
+ 0x4c1610UL
+#define YULD_REG_DBG_SELECT_BB_K2 \
+ 0x4c9600UL
+#define YULD_REG_DBG_DWORD_ENABLE_BB_K2 \
+ 0x4c9604UL
+#define YULD_REG_DBG_SHIFT_BB_K2 \
+ 0x4c9608UL
+#define YULD_REG_DBG_FORCE_VALID_BB_K2 \
+ 0x4c960cUL
+#define YULD_REG_DBG_FORCE_FRAME_BB_K2 \
+ 0x4c9610UL
+#define TMLD_REG_DBG_SELECT \
+ 0x4d1600UL
+#define TMLD_REG_DBG_DWORD_ENABLE \
+ 0x4d1604UL
+#define TMLD_REG_DBG_SHIFT \
+ 0x4d1608UL
+#define TMLD_REG_DBG_FORCE_VALID \
+ 0x4d160cUL
+#define TMLD_REG_DBG_FORCE_FRAME \
+ 0x4d1610UL
+#define MULD_REG_DBG_SELECT \
+ 0x4e1600UL
+#define MULD_REG_DBG_DWORD_ENABLE \
+ 0x4e1604UL
+#define MULD_REG_DBG_SHIFT \
+ 0x4e1608UL
+#define MULD_REG_DBG_FORCE_VALID \
+ 0x4e160cUL
+#define MULD_REG_DBG_FORCE_FRAME \
+ 0x4e1610UL
+#define NIG_REG_DBG_SELECT \
+ 0x502140UL
+#define NIG_REG_DBG_DWORD_ENABLE \
+ 0x502144UL
+#define NIG_REG_DBG_SHIFT \
+ 0x502148UL
+#define NIG_REG_DBG_FORCE_VALID \
+ 0x50214cUL
+#define NIG_REG_DBG_FORCE_FRAME \
+ 0x502150UL
+#define BMB_REG_DBG_SELECT \
+ 0x540a7cUL
+#define BMB_REG_DBG_DWORD_ENABLE \
+ 0x540a80UL
+#define BMB_REG_DBG_SHIFT \
+ 0x540a84UL
+#define BMB_REG_DBG_FORCE_VALID \
+ 0x540a88UL
+#define BMB_REG_DBG_FORCE_FRAME \
+ 0x540a8cUL
+#define PTU_REG_DBG_SELECT \
+ 0x560100UL
+#define PTU_REG_DBG_DWORD_ENABLE \
+ 0x560104UL
+#define PTU_REG_DBG_SHIFT \
+ 0x560108UL
+#define PTU_REG_DBG_FORCE_VALID \
+ 0x56010cUL
+#define PTU_REG_DBG_FORCE_FRAME \
+ 0x560110UL
+#define CDU_REG_DBG_SELECT \
+ 0x580704UL
+#define CDU_REG_DBG_DWORD_ENABLE \
+ 0x580708UL
+#define CDU_REG_DBG_SHIFT \
+ 0x58070cUL
+#define CDU_REG_DBG_FORCE_VALID \
+ 0x580710UL
+#define CDU_REG_DBG_FORCE_FRAME \
+ 0x580714UL
+#define WOL_REG_DBG_SELECT_K2_E5 \
+ 0x600140UL
+#define WOL_REG_DBG_DWORD_ENABLE_K2_E5 \
+ 0x600144UL
+#define WOL_REG_DBG_SHIFT_K2_E5 \
+ 0x600148UL
+#define WOL_REG_DBG_FORCE_VALID_K2_E5 \
+ 0x60014cUL
+#define WOL_REG_DBG_FORCE_FRAME_K2_E5 \
+ 0x600150UL
+#define BMBN_REG_DBG_SELECT_K2_E5 \
+ 0x610140UL
+#define BMBN_REG_DBG_DWORD_ENABLE_K2_E5 \
+ 0x610144UL
+#define BMBN_REG_DBG_SHIFT_K2_E5 \
+ 0x610148UL
+#define BMBN_REG_DBG_FORCE_VALID_K2_E5 \
+ 0x61014cUL
+#define BMBN_REG_DBG_FORCE_FRAME_K2_E5 \
+ 0x610150UL
+#define NWM_REG_DBG_SELECT_K2_E5 \
+ 0x8000ecUL
+#define NWM_REG_DBG_DWORD_ENABLE_K2_E5 \
+ 0x8000f0UL
+#define NWM_REG_DBG_SHIFT_K2_E5 \
+ 0x8000f4UL
+#define NWM_REG_DBG_FORCE_VALID_K2_E5 \
+ 0x8000f8UL
+#define NWM_REG_DBG_FORCE_FRAME_K2_E5 \
+ 0x8000fcUL
+#define PBF_REG_DBG_SELECT \
+ 0xd80060UL
+#define PBF_REG_DBG_DWORD_ENABLE \
+ 0xd80064UL
+#define PBF_REG_DBG_SHIFT \
+ 0xd80068UL
+#define PBF_REG_DBG_FORCE_VALID \
+ 0xd8006cUL
+#define PBF_REG_DBG_FORCE_FRAME \
+ 0xd80070UL
+#define PBF_PB1_REG_DBG_SELECT \
+ 0xda0728UL
+#define PBF_PB1_REG_DBG_DWORD_ENABLE \
+ 0xda072cUL
+#define PBF_PB1_REG_DBG_SHIFT \
+ 0xda0730UL
+#define PBF_PB1_REG_DBG_FORCE_VALID \
+ 0xda0734UL
+#define PBF_PB1_REG_DBG_FORCE_FRAME \
+ 0xda0738UL
+#define PBF_PB2_REG_DBG_SELECT \
+ 0xda4728UL
+#define PBF_PB2_REG_DBG_DWORD_ENABLE \
+ 0xda472cUL
+#define PBF_PB2_REG_DBG_SHIFT \
+ 0xda4730UL
+#define PBF_PB2_REG_DBG_FORCE_VALID \
+ 0xda4734UL
+#define PBF_PB2_REG_DBG_FORCE_FRAME \
+ 0xda4738UL
+#define BTB_REG_DBG_SELECT \
+ 0xdb08c8UL
+#define BTB_REG_DBG_DWORD_ENABLE \
+ 0xdb08ccUL
+#define BTB_REG_DBG_SHIFT \
+ 0xdb08d0UL
+#define BTB_REG_DBG_FORCE_VALID \
+ 0xdb08d4UL
+#define BTB_REG_DBG_FORCE_FRAME \
+ 0xdb08d8UL
+#define XSDM_REG_DBG_SELECT \
+ 0xf80e28UL
+#define XSDM_REG_DBG_DWORD_ENABLE \
+ 0xf80e2cUL
+#define XSDM_REG_DBG_SHIFT \
+ 0xf80e30UL
+#define XSDM_REG_DBG_FORCE_VALID \
+ 0xf80e34UL
+#define XSDM_REG_DBG_FORCE_FRAME \
+ 0xf80e38UL
+#define YSDM_REG_DBG_SELECT \
+ 0xf90e28UL
+#define YSDM_REG_DBG_DWORD_ENABLE \
+ 0xf90e2cUL
+#define YSDM_REG_DBG_SHIFT \
+ 0xf90e30UL
+#define YSDM_REG_DBG_FORCE_VALID \
+ 0xf90e34UL
+#define YSDM_REG_DBG_FORCE_FRAME \
+ 0xf90e38UL
+#define PSDM_REG_DBG_SELECT \
+ 0xfa0e28UL
+#define PSDM_REG_DBG_DWORD_ENABLE \
+ 0xfa0e2cUL
+#define PSDM_REG_DBG_SHIFT \
+ 0xfa0e30UL
+#define PSDM_REG_DBG_FORCE_VALID \
+ 0xfa0e34UL
+#define PSDM_REG_DBG_FORCE_FRAME \
+ 0xfa0e38UL
+#define TSDM_REG_DBG_SELECT \
+ 0xfb0e28UL
+#define TSDM_REG_DBG_DWORD_ENABLE \
+ 0xfb0e2cUL
+#define TSDM_REG_DBG_SHIFT \
+ 0xfb0e30UL
+#define TSDM_REG_DBG_FORCE_VALID \
+ 0xfb0e34UL
+#define TSDM_REG_DBG_FORCE_FRAME \
+ 0xfb0e38UL
+#define MSDM_REG_DBG_SELECT \
+ 0xfc0e28UL
+#define MSDM_REG_DBG_DWORD_ENABLE \
+ 0xfc0e2cUL
+#define MSDM_REG_DBG_SHIFT \
+ 0xfc0e30UL
+#define MSDM_REG_DBG_FORCE_VALID \
+ 0xfc0e34UL
+#define MSDM_REG_DBG_FORCE_FRAME \
+ 0xfc0e38UL
+#define USDM_REG_DBG_SELECT \
+ 0xfd0e28UL
+#define USDM_REG_DBG_DWORD_ENABLE \
+ 0xfd0e2cUL
+#define USDM_REG_DBG_SHIFT \
+ 0xfd0e30UL
+#define USDM_REG_DBG_FORCE_VALID \
+ 0xfd0e34UL
+#define USDM_REG_DBG_FORCE_FRAME \
+ 0xfd0e38UL
+#define XCM_REG_DBG_SELECT \
+ 0x1000040UL
+#define XCM_REG_DBG_DWORD_ENABLE \
+ 0x1000044UL
+#define XCM_REG_DBG_SHIFT \
+ 0x1000048UL
+#define XCM_REG_DBG_FORCE_VALID \
+ 0x100004cUL
+#define XCM_REG_DBG_FORCE_FRAME \
+ 0x1000050UL
+#define YCM_REG_DBG_SELECT \
+ 0x1080040UL
+#define YCM_REG_DBG_DWORD_ENABLE \
+ 0x1080044UL
+#define YCM_REG_DBG_SHIFT \
+ 0x1080048UL
+#define YCM_REG_DBG_FORCE_VALID \
+ 0x108004cUL
+#define YCM_REG_DBG_FORCE_FRAME \
+ 0x1080050UL
+#define PCM_REG_DBG_SELECT \
+ 0x1100040UL
+#define PCM_REG_DBG_DWORD_ENABLE \
+ 0x1100044UL
+#define PCM_REG_DBG_SHIFT \
+ 0x1100048UL
+#define PCM_REG_DBG_FORCE_VALID \
+ 0x110004cUL
+#define PCM_REG_DBG_FORCE_FRAME \
+ 0x1100050UL
+#define TCM_REG_DBG_SELECT \
+ 0x1180040UL
+#define TCM_REG_DBG_DWORD_ENABLE \
+ 0x1180044UL
+#define TCM_REG_DBG_SHIFT \
+ 0x1180048UL
+#define TCM_REG_DBG_FORCE_VALID \
+ 0x118004cUL
+#define TCM_REG_DBG_FORCE_FRAME \
+ 0x1180050UL
+#define MCM_REG_DBG_SELECT \
+ 0x1200040UL
+#define MCM_REG_DBG_DWORD_ENABLE \
+ 0x1200044UL
+#define MCM_REG_DBG_SHIFT \
+ 0x1200048UL
+#define MCM_REG_DBG_FORCE_VALID \
+ 0x120004cUL
+#define MCM_REG_DBG_FORCE_FRAME \
+ 0x1200050UL
+#define UCM_REG_DBG_SELECT \
+ 0x1280050UL
+#define UCM_REG_DBG_DWORD_ENABLE \
+ 0x1280054UL
+#define UCM_REG_DBG_SHIFT \
+ 0x1280058UL
+#define UCM_REG_DBG_FORCE_VALID \
+ 0x128005cUL
+#define UCM_REG_DBG_FORCE_FRAME \
+ 0x1280060UL
+#define XSEM_REG_DBG_SELECT \
+ 0x1401528UL
+#define XSEM_REG_DBG_DWORD_ENABLE \
+ 0x140152cUL
+#define XSEM_REG_DBG_SHIFT \
+ 0x1401530UL
+#define XSEM_REG_DBG_FORCE_VALID \
+ 0x1401534UL
+#define XSEM_REG_DBG_FORCE_FRAME \
+ 0x1401538UL
+#define YSEM_REG_DBG_SELECT \
+ 0x1501528UL
+#define YSEM_REG_DBG_DWORD_ENABLE \
+ 0x150152cUL
+#define YSEM_REG_DBG_SHIFT \
+ 0x1501530UL
+#define YSEM_REG_DBG_FORCE_VALID \
+ 0x1501534UL
+#define YSEM_REG_DBG_FORCE_FRAME \
+ 0x1501538UL
+#define PSEM_REG_DBG_SELECT \
+ 0x1601528UL
+#define PSEM_REG_DBG_DWORD_ENABLE \
+ 0x160152cUL
+#define PSEM_REG_DBG_SHIFT \
+ 0x1601530UL
+#define PSEM_REG_DBG_FORCE_VALID \
+ 0x1601534UL
+#define PSEM_REG_DBG_FORCE_FRAME \
+ 0x1601538UL
+#define TSEM_REG_DBG_SELECT \
+ 0x1701528UL
+#define TSEM_REG_DBG_DWORD_ENABLE \
+ 0x170152cUL
+#define TSEM_REG_DBG_SHIFT \
+ 0x1701530UL
+#define TSEM_REG_DBG_FORCE_VALID \
+ 0x1701534UL
+#define TSEM_REG_DBG_FORCE_FRAME \
+ 0x1701538UL
+#define DORQ_REG_PF_USAGE_CNT \
+ 0x1009c0UL
+#define DORQ_REG_PF_OVFL_STICKY \
+ 0x1009d0UL
+#define DORQ_REG_DPM_FORCE_ABORT \
+ 0x1009d8UL
+#define DORQ_REG_INT_STS \
+ 0x100180UL
+#define DORQ_REG_INT_STS_ADDRESS_ERROR \
+ (0x1UL << 0)
+#define DORQ_REG_INT_STS_WR \
+ 0x100188UL
+#define DORQ_REG_DB_DROP_DETAILS_REL \
+ 0x100a28UL
+#define DORQ_REG_INT_STS_ADDRESS_ERROR_SHIFT \
+ 0
+#define DORQ_REG_INT_STS_DB_DROP \
+ (0x1UL << 1)
+#define DORQ_REG_INT_STS_DB_DROP_SHIFT \
+ 1
+#define DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR \
+ (0x1UL << 2)
+#define DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR_SHIFT \
+ 2
+#define DORQ_REG_INT_STS_DORQ_FIFO_AFULL\
+ (0x1UL << 3)
+#define DORQ_REG_INT_STS_DORQ_FIFO_AFULL_SHIFT \
+ 3
+#define DORQ_REG_INT_STS_CFC_BYP_VALIDATION_ERR \
+ (0x1UL << 4)
+#define DORQ_REG_INT_STS_CFC_BYP_VALIDATION_ERR_SHIFT \
+ 4
+#define DORQ_REG_INT_STS_CFC_LD_RESP_ERR \
+ (0x1UL << 5)
+#define DORQ_REG_INT_STS_CFC_LD_RESP_ERR_SHIFT \
+ 5
+#define DORQ_REG_INT_STS_XCM_DONE_CNT_ERR \
+ (0x1UL << 6)
+#define DORQ_REG_INT_STS_XCM_DONE_CNT_ERR_SHIFT \
+ 6
+#define DORQ_REG_INT_STS_CFC_LD_REQ_FIFO_OVFL_ERR \
+ (0x1UL << 7)
+#define DORQ_REG_INT_STS_CFC_LD_REQ_FIFO_OVFL_ERR_SHIFT \
+ 7
+#define DORQ_REG_INT_STS_CFC_LD_REQ_FIFO_UNDER_ERR \
+ (0x1UL << 8)
+#define DORQ_REG_INT_STS_CFC_LD_REQ_FIFO_UNDER_ERR_SHIFT \
+ 8
+#define DORQ_REG_DB_DROP_DETAILS_REASON \
+ 0x100a20UL
+#define MSEM_REG_DBG_SELECT \
+ 0x1801528UL
+#define MSEM_REG_DBG_DWORD_ENABLE \
+ 0x180152cUL
+#define MSEM_REG_DBG_SHIFT \
+ 0x1801530UL
+#define MSEM_REG_DBG_FORCE_VALID \
+ 0x1801534UL
+#define MSEM_REG_DBG_FORCE_FRAME \
+ 0x1801538UL
+#define USEM_REG_DBG_SELECT \
+ 0x1901528UL
+#define USEM_REG_DBG_DWORD_ENABLE \
+ 0x190152cUL
+#define USEM_REG_DBG_SHIFT \
+ 0x1901530UL
+#define USEM_REG_DBG_FORCE_VALID \
+ 0x1901534UL
+#define USEM_REG_DBG_FORCE_FRAME \
+ 0x1901538UL
+#define NWS_REG_DBG_SELECT_K2_E5 \
+ 0x700128UL
+#define NWS_REG_DBG_DWORD_ENABLE_K2_E5 \
+ 0x70012cUL
+#define NWS_REG_DBG_SHIFT_K2_E5 \
+ 0x700130UL
+#define NWS_REG_DBG_FORCE_VALID_K2_E5 \
+ 0x700134UL
+#define NWS_REG_DBG_FORCE_FRAME_K2_E5 \
+ 0x700138UL
+#define MS_REG_DBG_SELECT_K2_E5 \
+ 0x6a0228UL
+#define MS_REG_DBG_DWORD_ENABLE_K2_E5 \
+ 0x6a022cUL
+#define MS_REG_DBG_SHIFT_K2_E5 \
+ 0x6a0230UL
+#define MS_REG_DBG_FORCE_VALID_K2_E5 \
+ 0x6a0234UL
+#define MS_REG_DBG_FORCE_FRAME_K2_E5 \
+ 0x6a0238UL
+#define PCIE_REG_DBG_COMMON_SELECT_K2_E5 \
+ 0x054398UL
+#define PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5 \
+ 0x05439cUL
+#define PCIE_REG_DBG_COMMON_SHIFT_K2_E5 \
+ 0x0543a0UL
+#define PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5 \
+ 0x0543a4UL
+#define PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5 \
+ 0x0543a8UL
+#define PTLD_REG_DBG_SELECT_E5 \
+ 0x5a1600UL
+#define PTLD_REG_DBG_DWORD_ENABLE_E5 \
+ 0x5a1604UL
+#define PTLD_REG_DBG_SHIFT_E5 \
+ 0x5a1608UL
+#define PTLD_REG_DBG_FORCE_VALID_E5 \
+ 0x5a160cUL
+#define PTLD_REG_DBG_FORCE_FRAME_E5 \
+ 0x5a1610UL
+#define YPLD_REG_DBG_SELECT_E5 \
+ 0x5c1600UL
+#define YPLD_REG_DBG_DWORD_ENABLE_E5 \
+ 0x5c1604UL
+#define YPLD_REG_DBG_SHIFT_E5 \
+ 0x5c1608UL
+#define YPLD_REG_DBG_FORCE_VALID_E5 \
+ 0x5c160cUL
+#define YPLD_REG_DBG_FORCE_FRAME_E5 \
+ 0x5c1610UL
+#define RGSRC_REG_DBG_SELECT_E5 \
+ 0x320040UL
+#define RGSRC_REG_DBG_DWORD_ENABLE_E5 \
+ 0x320044UL
+#define RGSRC_REG_DBG_SHIFT_E5 \
+ 0x320048UL
+#define RGSRC_REG_DBG_FORCE_VALID_E5 \
+ 0x32004cUL
+#define RGSRC_REG_DBG_FORCE_FRAME_E5 \
+ 0x320050UL
+#define TGSRC_REG_DBG_SELECT_E5 \
+ 0x322040UL
+#define TGSRC_REG_DBG_DWORD_ENABLE_E5 \
+ 0x322044UL
+#define TGSRC_REG_DBG_SHIFT_E5 \
+ 0x322048UL
+#define TGSRC_REG_DBG_FORCE_VALID_E5 \
+ 0x32204cUL
+#define TGSRC_REG_DBG_FORCE_FRAME_E5 \
+ 0x322050UL
+#define MISC_REG_RESET_PL_UA \
+ 0x008050UL
+#define MISC_REG_RESET_PL_HV \
+ 0x008060UL
+#define XCM_REG_CTX_RBC_ACCS \
+ 0x1001800UL
+#define XCM_REG_AGG_CON_CTX \
+ 0x1001804UL
+#define XCM_REG_SM_CON_CTX \
+ 0x1001808UL
+#define YCM_REG_CTX_RBC_ACCS \
+ 0x1081800UL
+#define YCM_REG_AGG_CON_CTX \
+ 0x1081804UL
+#define YCM_REG_AGG_TASK_CTX \
+ 0x1081808UL
+#define YCM_REG_SM_CON_CTX \
+ 0x108180cUL
+#define YCM_REG_SM_TASK_CTX \
+ 0x1081810UL
+#define PCM_REG_CTX_RBC_ACCS \
+ 0x1101440UL
+#define PCM_REG_SM_CON_CTX \
+ 0x1101444UL
+#define TCM_REG_CTX_RBC_ACCS \
+ 0x11814c0UL
+#define TCM_REG_AGG_CON_CTX \
+ 0x11814c4UL
+#define TCM_REG_AGG_TASK_CTX \
+ 0x11814c8UL
+#define TCM_REG_SM_CON_CTX \
+ 0x11814ccUL
+#define TCM_REG_SM_TASK_CTX \
+ 0x11814d0UL
+#define MCM_REG_CTX_RBC_ACCS \
+ 0x1201800UL
+#define MCM_REG_AGG_CON_CTX \
+ 0x1201804UL
+#define MCM_REG_AGG_TASK_CTX \
+ 0x1201808UL
+#define MCM_REG_SM_CON_CTX \
+ 0x120180cUL
+#define MCM_REG_SM_TASK_CTX \
+ 0x1201810UL
+#define UCM_REG_CTX_RBC_ACCS \
+ 0x1281700UL
+#define UCM_REG_AGG_CON_CTX \
+ 0x1281704UL
+#define UCM_REG_AGG_TASK_CTX \
+ 0x1281708UL
+#define UCM_REG_SM_CON_CTX \
+ 0x128170cUL
+#define UCM_REG_SM_TASK_CTX \
+ 0x1281710UL
+#define XSEM_REG_SLOW_DBG_EMPTY_BB_K2 \
+ 0x1401140UL
+#define XSEM_REG_SYNC_DBG_EMPTY \
+ 0x1401160UL
+#define XSEM_REG_SLOW_DBG_ACTIVE \
+ 0x1401400UL
+#define XSEM_REG_SLOW_DBG_MODE \
+ 0x1401404UL
+#define XSEM_REG_DBG_FRAME_MODE \
+ 0x1401408UL
+#define XSEM_REG_DBG_GPRE_VECT \
+ 0x1401410UL
+#define XSEM_REG_DBG_MODE1_CFG \
+ 0x1401420UL
+#define XSEM_REG_FAST_MEMORY \
+ 0x1440000UL
+#define YSEM_REG_SYNC_DBG_EMPTY \
+ 0x1501160UL
+#define YSEM_REG_SLOW_DBG_ACTIVE \
+ 0x1501400UL
+#define YSEM_REG_SLOW_DBG_MODE \
+ 0x1501404UL
+#define YSEM_REG_DBG_FRAME_MODE \
+ 0x1501408UL
+#define YSEM_REG_DBG_GPRE_VECT \
+ 0x1501410UL
+#define YSEM_REG_DBG_MODE1_CFG \
+ 0x1501420UL
+#define YSEM_REG_FAST_MEMORY \
+ 0x1540000UL
+#define PSEM_REG_SLOW_DBG_EMPTY_BB_K2 \
+ 0x1601140UL
+#define PSEM_REG_SYNC_DBG_EMPTY \
+ 0x1601160UL
+#define PSEM_REG_SLOW_DBG_ACTIVE \
+ 0x1601400UL
+#define PSEM_REG_SLOW_DBG_MODE \
+ 0x1601404UL
+#define PSEM_REG_DBG_FRAME_MODE \
+ 0x1601408UL
+#define PSEM_REG_DBG_GPRE_VECT \
+ 0x1601410UL
+#define PSEM_REG_DBG_MODE1_CFG \
+ 0x1601420UL
+#define PSEM_REG_FAST_MEMORY \
+ 0x1640000UL
+#define TSEM_REG_SLOW_DBG_EMPTY_BB_K2 \
+ 0x1701140UL
+#define TSEM_REG_SYNC_DBG_EMPTY \
+ 0x1701160UL
+#define TSEM_REG_SLOW_DBG_ACTIVE \
+ 0x1701400UL
+#define TSEM_REG_SLOW_DBG_MODE \
+ 0x1701404UL
+#define TSEM_REG_DBG_FRAME_MODE \
+ 0x1701408UL
+#define TSEM_REG_DBG_GPRE_VECT \
+ 0x1701410UL
+#define TSEM_REG_DBG_MODE1_CFG \
+ 0x1701420UL
+#define TSEM_REG_FAST_MEMORY \
+ 0x1740000UL
+#define MSEM_REG_SLOW_DBG_EMPTY_BB_K2 \
+ 0x1801140UL
+#define MSEM_REG_SYNC_DBG_EMPTY \
+ 0x1801160UL
+#define MSEM_REG_SLOW_DBG_ACTIVE \
+ 0x1801400UL
+#define MSEM_REG_SLOW_DBG_MODE \
+ 0x1801404UL
+#define MSEM_REG_DBG_FRAME_MODE \
+ 0x1801408UL
+#define MSEM_REG_DBG_GPRE_VECT \
+ 0x1801410UL
+#define MSEM_REG_DBG_MODE1_CFG \
+ 0x1801420UL
+#define MSEM_REG_FAST_MEMORY \
+ 0x1840000UL
+#define USEM_REG_SLOW_DBG_EMPTY_BB_K2 \
+ 0x1901140UL
+#define SEM_FAST_REG_INT_RAM_SIZE \
+ 20480
+#define USEM_REG_SYNC_DBG_EMPTY \
+ 0x1901160UL
+#define USEM_REG_SLOW_DBG_ACTIVE \
+ 0x1901400UL
+#define USEM_REG_SLOW_DBG_MODE \
+ 0x1901404UL
+#define USEM_REG_DBG_FRAME_MODE \
+ 0x1901408UL
+#define USEM_REG_DBG_GPRE_VECT \
+ 0x1901410UL
+#define USEM_REG_DBG_MODE1_CFG \
+ 0x1901420UL
+#define USEM_REG_FAST_MEMORY \
+ 0x1940000UL
+#define SEM_FAST_REG_DBG_MODE23_SRC_DISABLE \
+ 0x000748UL
+#define SEM_FAST_REG_DBG_MODSRC_DISABLE \
+ 0x00074cUL
+#define SEM_FAST_REG_DBG_MODE6_SRC_DISABLE \
+ 0x000750UL
+#define SEM_FAST_REG_DEBUG_ACTIVE \
+ 0x000740UL
+#define SEM_FAST_REG_INT_RAM \
+ 0x020000UL
+#define SEM_FAST_REG_INT_RAM_SIZE_BB_K2 \
+ 20480
+#define SEM_FAST_REG_RECORD_FILTER_ENABLE \
+ 0x000768UL
+#define GRC_REG_TRACE_FIFO_VALID_DATA \
+ 0x050064UL
+#define GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW \
+ 0x05040cUL
+#define GRC_REG_PROTECTION_OVERRIDE_WINDOW \
+ 0x050500UL
+#define IGU_REG_ERROR_HANDLING_MEMORY \
+ 0x181520UL
+#define MCP_REG_CPU_MODE \
+ 0xe05000UL
+#define MCP_REG_CPU_MODE_SOFT_HALT \
+ (0x1 << 10)
+#define BRB_REG_BIG_RAM_ADDRESS \
+ 0x340800UL
+#define BRB_REG_BIG_RAM_DATA \
+ 0x341500UL
+#define BRB_REG_BIG_RAM_DATA_SIZE \
+ 64
+#define SEM_FAST_REG_STALL_0 \
+ 0x000488UL
+#define SEM_FAST_REG_STALLED \
+ 0x000494UL
+#define BTB_REG_BIG_RAM_ADDRESS \
+ 0xdb0800UL
+#define BTB_REG_BIG_RAM_DATA \
+ 0xdb0c00UL
+#define BMB_REG_BIG_RAM_ADDRESS \
+ 0x540800UL
+#define BMB_REG_BIG_RAM_DATA \
+ 0x540f00UL
+#define SEM_FAST_REG_STORM_REG_FILE \
+ 0x008000UL
+#define RSS_REG_RSS_RAM_ADDR \
+ 0x238c30UL
+#define MISCS_REG_BLOCK_256B_EN \
+ 0x009074UL
+#define MCP_REG_SCRATCH_SIZE_BB_K2 \
+ 57344
+#define MCP_REG_CPU_REG_FILE \
+ 0xe05200UL
+#define MCP_REG_CPU_REG_FILE_SIZE \
+ 32
+#define DBG_REG_DEBUG_TARGET \
+ 0x01005cUL
+#define DBG_REG_FULL_MODE \
+ 0x010060UL
+#define DBG_REG_CALENDAR_OUT_DATA \
+ 0x010480UL
+#define GRC_REG_TRACE_FIFO \
+ 0x050068UL
+#define IGU_REG_ERROR_HANDLING_DATA_VALID \
+ 0x181530UL
+#define DBG_REG_DBG_BLOCK_ON \
+ 0x010454UL
+#define DBG_REG_FILTER_ENABLE \
+ 0x0109d0UL
+#define DBG_REG_FRAMING_MODE \
+ 0x010058UL
+#define DBG_REG_TRIGGER_ENABLE \
+ 0x01054cUL
+#define SEM_FAST_REG_VFC_DATA_WR \
+ 0x000b40UL
+#define SEM_FAST_REG_VFC_ADDR \
+ 0x000b44UL
+#define SEM_FAST_REG_VFC_DATA_RD \
+ 0x000b48UL
+#define SEM_FAST_REG_VFC_STATUS \
+ 0x000b4cUL
+#define RSS_REG_RSS_RAM_DATA \
+ 0x238c20UL
+#define RSS_REG_RSS_RAM_DATA_SIZE \
+ 4
+#define MISC_REG_BLOCK_256B_EN \
+ 0x008c14UL
+#define NWS_REG_NWS_CMU_K2 \
+ 0x720000UL
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2 \
+ 0x000680UL
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2 \
+ 0x000684UL
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2 \
+ 0x0006c0UL
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2 \
+ 0x0006c4UL
+#define MS_REG_MS_CMU_K2 \
+ 0x6a4000UL
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2 \
+ 0x000208UL
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2 \
+ 0x00020cUL
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2 \
+ 0x000210UL
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2 \
+ 0x000214UL
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2 \
+ 0x000208UL
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2 \
+ 0x00020cUL
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2 \
+ 0x000210UL
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2 \
+ 0x000214UL
+#define PHY_PCIE_REG_PHY0_K2 \
+ 0x620000UL
+#define PHY_PCIE_REG_PHY1_K2 \
+ 0x624000UL
+#define NIG_REG_ROCE_DUPLICATE_TO_HOST 0x5088f0UL
+#define NIG_REG_PPF_TO_ENGINE_SEL 0x508900UL
+#define NIG_REG_PPF_TO_ENGINE_SEL_SIZE 8
+#define PRS_REG_LIGHT_L2_ETHERTYPE_EN 0x1f0968UL
+#define NIG_REG_LLH_ENG_CLS_ENG_ID_TBL 0x501b90UL
+#define DORQ_REG_PF_DPM_ENABLE 0x100510UL
+#define DORQ_REG_PF_ICID_BIT_SHIFT_NORM 0x100448UL
+#define DORQ_REG_PF_MIN_ADDR_REG1 0x100400UL
+#define DORQ_REG_PF_DPI_BIT_SHIFT 0x100450UL
+#define NIG_REG_RX_PTP_EN 0x501900UL
+#define NIG_REG_TX_PTP_EN 0x501904UL
+#define NIG_REG_LLH_PTP_TO_HOST 0x501908UL
+#define NIG_REG_LLH_PTP_TO_MCP 0x50190cUL
+#define NIG_REG_PTP_SW_TXTSEN 0x501910UL
+#define NIG_REG_LLH_PTP_ETHERTYPE_1 0x501914UL
+#define NIG_REG_LLH_PTP_MAC_DA_2_LSB 0x501918UL
+#define NIG_REG_LLH_PTP_MAC_DA_2_MSB 0x50191cUL
+#define NIG_REG_LLH_PTP_PARAM_MASK 0x501920UL
+#define NIG_REG_LLH_PTP_RULE_MASK 0x501924UL
+#define NIG_REG_TX_LLH_PTP_PARAM_MASK 0x501928UL
+#define NIG_REG_TX_LLH_PTP_RULE_MASK 0x50192cUL
+#define NIG_REG_LLH_PTP_HOST_BUF_SEQID 0x501930UL
+#define NIG_REG_LLH_PTP_HOST_BUF_TS_LSB 0x501934UL
+#define NIG_REG_LLH_PTP_HOST_BUF_TS_MSB 0x501938UL
+#define NIG_REG_LLH_PTP_MCP_BUF_SEQID 0x50193cUL
+#define NIG_REG_LLH_PTP_MCP_BUF_TS_LSB 0x501940UL
+#define NIG_REG_LLH_PTP_MCP_BUF_TS_MSB 0x501944UL
+#define NIG_REG_TX_LLH_PTP_BUF_SEQID 0x501948UL
+#define NIG_REG_TX_LLH_PTP_BUF_TS_LSB 0x50194cUL
+#define NIG_REG_TX_LLH_PTP_BUF_TS_MSB 0x501950UL
+#define NIG_REG_RX_PTP_TS_MSB_ERR 0x501954UL
+#define NIG_REG_TX_PTP_TS_MSB_ERR 0x501958UL
+#define NIG_REG_TSGEN_SYNC_TIME_LSB 0x5088c0UL
+#define NIG_REG_TSGEN_SYNC_TIME_MSB 0x5088c4UL
+#define NIG_REG_TSGEN_RST_DRIFT_CNTR 0x5088d8UL
+#define NIG_REG_TSGEN_DRIFT_CNTR_CONF 0x5088dcUL
+#define NIG_REG_TS_OUTPUT_ENABLE_PDA 0x508870UL
+#define NIG_REG_TIMESYNC_GEN_REG_BB 0x500d00UL
+#define NIG_REG_TSGEN_FREE_CNT_VALUE_LSB 0x5088a8UL
+#define NIG_REG_TSGEN_FREE_CNT_VALUE_MSB 0x5088acUL
+#define NIG_REG_PTP_LATCH_OSTS_PKT_TIME 0x509040UL
+#define PSWRQ2_REG_WR_MBS0 0x240400UL
+
+#define PGLUE_B_REG_PGL_ADDR_E8_F0_K2 0x2aaf98UL
+#define PGLUE_B_REG_PGL_ADDR_EC_F0_K2 0x2aaf9cUL
+#define PGLUE_B_REG_PGL_ADDR_F0_F0_K2 0x2aafa0UL
+#define PGLUE_B_REG_PGL_ADDR_F4_F0_K2 0x2aafa4UL
+#define PGLUE_B_REG_MASTER_WRITE_PAD_ENABLE 0x2aae30UL
+#define NIG_REG_TSGEN_FREECNT_UPDATE_K2 0x509008UL
+#define CNIG_REG_NIG_PORT0_CONF_K2 0x218200UL
+
+#define NIG_REG_TX_EDPM_CTRL 0x501f0cUL
+#define NIG_REG_TX_EDPM_CTRL_TX_EDPM_EN (0x1 << 0)
+#define NIG_REG_TX_EDPM_CTRL_TX_EDPM_EN_SHIFT 0
+#define NIG_REG_TX_EDPM_CTRL_TX_EDPM_TC_EN (0xff << 1)
+#define NIG_REG_TX_EDPM_CTRL_TX_EDPM_TC_EN_SHIFT 1
+
+#define PRS_REG_SEARCH_GFT 0x1f11bcUL
+#define PRS_REG_SEARCH_NON_IP_AS_GFT 0x1f11c0UL
+#define PRS_REG_CM_HDR_GFT 0x1f11c8UL
+#define PRS_REG_GFT_CAM 0x1f1100UL
+#define PRS_REG_GFT_PROFILE_MASK_RAM 0x1f1000UL
+#define PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT 0
+#define PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT 8
+#define PRS_REG_LOAD_L2_FILTER 0x1f0198UL
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
new file mode 100644
index 000000000..134ecfca9
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -0,0 +1,1143 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ * Copyright (c) 2019-2020 Marvell International Ltd.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/if_vlan.h>
+#include "qed.h"
+#include "qed_cxt.h"
+#include "qed_dcbx.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_init_ops.h"
+#include "qed_int.h"
+#include "qed_ll2.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+#include <linux/qed/qed_rdma_if.h>
+#include "qed_rdma.h"
+#include "qed_roce.h"
+#include "qed_sp.h"
+
+static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid);
+
+static int qed_roce_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code,
+ __le16 echo, union event_ring_data *data,
+ u8 fw_return_code)
+{
+ struct qed_rdma_events events = p_hwfn->p_rdma_info->events;
+ union rdma_eqe_data *rdata = &data->rdma_data;
+
+ if (fw_event_code == ROCE_ASYNC_EVENT_DESTROY_QP_DONE) {
+ u16 icid = (u16)le32_to_cpu(rdata->rdma_destroy_qp_data.cid);
+
+ /* icid release in this async event can occur only if the icid
+ * was offloaded to the FW. In case it wasn't offloaded this is
+ * handled in qed_roce_sp_destroy_qp.
+ */
+ qed_roce_free_real_icid(p_hwfn, icid);
+ } else if (fw_event_code == ROCE_ASYNC_EVENT_SRQ_EMPTY ||
+ fw_event_code == ROCE_ASYNC_EVENT_SRQ_LIMIT) {
+ u16 srq_id = (u16)le32_to_cpu(rdata->async_handle.lo);
+
+ events.affiliated_event(events.context, fw_event_code,
+ &srq_id);
+ } else {
+ events.affiliated_event(events.context, fw_event_code,
+ (void *)&rdata->async_handle);
+ }
+
+ return 0;
+}
+
+void qed_roce_stop(struct qed_hwfn *p_hwfn)
+{
+ struct qed_bmap *rcid_map = &p_hwfn->p_rdma_info->real_cid_map;
+ int wait_count = 0;
+
+ /* when destroying a_RoCE QP the control is returned to the user after
+ * the synchronous part. The asynchronous part may take a little longer.
+ * We delay for a short while if an async destroy QP is still expected.
+ * Beyond the added delay we clear the bitmap anyway.
+ */
+ while (!bitmap_empty(rcid_map->bitmap, rcid_map->max_count)) {
+ /* If the HW device is during recovery, all resources are
+ * immediately reset without receiving a per-cid indication
+ * from HW. In this case we don't expect the cid bitmap to be
+ * cleared.
+ */
+ if (p_hwfn->cdev->recov_in_prog)
+ return;
+
+ msleep(100);
+ if (wait_count++ > 20) {
+ DP_NOTICE(p_hwfn, "cid bitmap wait timed out\n");
+ break;
+ }
+ }
+}
+
+static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid,
+ __le32 *dst_gid)
+{
+ u32 i;
+
+ if (qp->roce_mode == ROCE_V2_IPV4) {
+ /* The IPv4 addresses shall be aligned to the highest word.
+ * The lower words must be zero.
+ */
+ memset(src_gid, 0, sizeof(union qed_gid));
+ memset(dst_gid, 0, sizeof(union qed_gid));
+ src_gid[3] = cpu_to_le32(qp->sgid.ipv4_addr);
+ dst_gid[3] = cpu_to_le32(qp->dgid.ipv4_addr);
+ } else {
+ /* GIDs and IPv6 addresses coincide in location and size */
+ for (i = 0; i < ARRAY_SIZE(qp->sgid.dwords); i++) {
+ src_gid[i] = cpu_to_le32(qp->sgid.dwords[i]);
+ dst_gid[i] = cpu_to_le32(qp->dgid.dwords[i]);
+ }
+ }
+}
+
+static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode)
+{
+ switch (roce_mode) {
+ case ROCE_V1:
+ return PLAIN_ROCE;
+ case ROCE_V2_IPV4:
+ return RROCE_IPV4;
+ case ROCE_V2_IPV6:
+ return RROCE_IPV6;
+ default:
+ return MAX_ROCE_FLAVOR;
+ }
+}
+
+static void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid)
+{
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid);
+ qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid + 1);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
+int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid)
+{
+ struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
+ u32 responder_icid;
+ u32 requester_icid;
+ int rc;
+
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_rdma_info->cid_map,
+ &responder_icid);
+ if (rc) {
+ spin_unlock_bh(&p_rdma_info->lock);
+ return rc;
+ }
+
+ rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_rdma_info->cid_map,
+ &requester_icid);
+
+ spin_unlock_bh(&p_rdma_info->lock);
+ if (rc)
+ goto err;
+
+ /* the two icid's should be adjacent */
+ if ((requester_icid - responder_icid) != 1) {
+ DP_NOTICE(p_hwfn, "Failed to allocate two adjacent qp's'\n");
+ rc = -EINVAL;
+ goto err;
+ }
+
+ responder_icid += qed_cxt_get_proto_cid_start(p_hwfn,
+ p_rdma_info->proto);
+ requester_icid += qed_cxt_get_proto_cid_start(p_hwfn,
+ p_rdma_info->proto);
+
+ /* If these icids require a new ILT line allocate DMA-able context for
+ * an ILT page
+ */
+ rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, responder_icid);
+ if (rc)
+ goto err;
+
+ rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, requester_icid);
+ if (rc)
+ goto err;
+
+ *cid = (u16)responder_icid;
+ return rc;
+
+err:
+ spin_lock_bh(&p_rdma_info->lock);
+ qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, responder_icid);
+ qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, requester_icid);
+
+ spin_unlock_bh(&p_rdma_info->lock);
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA,
+ "Allocate CID - failed, rc = %d\n", rc);
+ return rc;
+}
+
+static void qed_roce_set_real_cid(struct qed_hwfn *p_hwfn, u32 cid)
+{
+ spin_lock_bh(&p_hwfn->p_rdma_info->lock);
+ qed_bmap_set_id(p_hwfn, &p_hwfn->p_rdma_info->real_cid_map, cid);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
+static u8 qed_roce_get_qp_tc(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
+{
+ u8 pri, tc = 0;
+
+ if (qp->vlan_id) {
+ pri = (qp->vlan_id & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+ tc = qed_dcbx_get_priority_tc(p_hwfn, pri);
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "qp icid %u tc: %u (vlan priority %s)\n",
+ qp->icid, tc, qp->vlan_id ? "enabled" : "disabled");
+
+ return tc;
+}
+
+static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp)
+{
+ struct roce_create_qp_resp_ramrod_data *p_ramrod;
+ u16 regular_latency_queue, low_latency_queue;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ enum protocol_type proto;
+ u32 flags = 0;
+ int rc;
+ u8 tc;
+
+ if (!qp->has_resp)
+ return 0;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
+
+ /* Allocate DMA-able memory for IRQ */
+ qp->irq_num_pages = 1;
+ qp->irq = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ RDMA_RING_PAGE_SIZE,
+ &qp->irq_phys_addr, GFP_KERNEL);
+ if (!qp->irq) {
+ rc = -ENOMEM;
+ DP_NOTICE(p_hwfn,
+ "qed create responder failed: cannot allocate memory (irq). rc = %d\n",
+ rc);
+ return rc;
+ }
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qp->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_CREATE_QP,
+ PROTOCOLID_ROCE, &init_data);
+ if (rc)
+ goto err;
+
+ SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR,
+ qed_roce_mode_to_flavor(qp->roce_mode));
+
+ SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN,
+ qp->incoming_rdma_read_en);
+
+ SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN,
+ qp->incoming_rdma_write_en);
+
+ SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN,
+ qp->incoming_atomic_en);
+
+ SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN,
+ qp->e2e_flow_control_en);
+
+ SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG, qp->use_srq);
+
+ SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN,
+ qp->fmr_and_reserved_lkey);
+
+ SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER,
+ qp->min_rnr_nak_timer);
+
+ SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_XRC_FLAG,
+ qed_rdma_is_xrc_qp(qp));
+
+ p_ramrod = &p_ent->ramrod.roce_create_qp_resp;
+ p_ramrod->flags = cpu_to_le32(flags);
+ p_ramrod->max_ird = qp->max_rd_atomic_resp;
+ p_ramrod->traffic_class = qp->traffic_class_tos;
+ p_ramrod->hop_limit = qp->hop_limit_ttl;
+ p_ramrod->irq_num_pages = qp->irq_num_pages;
+ p_ramrod->p_key = cpu_to_le16(qp->pkey);
+ p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
+ p_ramrod->dst_qp_id = cpu_to_le32(qp->dest_qp);
+ p_ramrod->mtu = cpu_to_le16(qp->mtu);
+ p_ramrod->initial_psn = cpu_to_le32(qp->rq_psn);
+ p_ramrod->pd = cpu_to_le16(qp->pd);
+ p_ramrod->rq_num_pages = cpu_to_le16(qp->rq_num_pages);
+ DMA_REGPAIR_LE(p_ramrod->rq_pbl_addr, qp->rq_pbl_ptr);
+ DMA_REGPAIR_LE(p_ramrod->irq_pbl_addr, qp->irq_phys_addr);
+ qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
+ p_ramrod->qp_handle_for_async.hi = qp->qp_handle_async.hi;
+ p_ramrod->qp_handle_for_async.lo = qp->qp_handle_async.lo;
+ p_ramrod->qp_handle_for_cqe.hi = qp->qp_handle.hi;
+ p_ramrod->qp_handle_for_cqe.lo = qp->qp_handle.lo;
+ p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) |
+ qp->rq_cq_id);
+ p_ramrod->xrc_domain = cpu_to_le16(qp->xrcd_id);
+
+ tc = qed_roce_get_qp_tc(p_hwfn, qp);
+ regular_latency_queue = qed_get_cm_pq_idx_ofld_mtc(p_hwfn, tc);
+ low_latency_queue = qed_get_cm_pq_idx_llt_mtc(p_hwfn, tc);
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "qp icid %u pqs: regular_latency %u low_latency %u\n",
+ qp->icid, regular_latency_queue - CM_TX_PQ_BASE,
+ low_latency_queue - CM_TX_PQ_BASE);
+ p_ramrod->regular_latency_phy_queue =
+ cpu_to_le16(regular_latency_queue);
+ p_ramrod->low_latency_phy_queue =
+ cpu_to_le16(low_latency_queue);
+
+ p_ramrod->dpi = cpu_to_le16(qp->dpi);
+
+ qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
+ qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr);
+
+ p_ramrod->udp_src_port = cpu_to_le16(qp->udp_src_port);
+ p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id);
+ p_ramrod->srq_id.srq_idx = cpu_to_le16(qp->srq_id);
+ p_ramrod->srq_id.opaque_fid = cpu_to_le16(p_hwfn->hw_info.opaque_fid);
+
+ p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) +
+ qp->stats_queue;
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc)
+ goto err;
+
+ qp->resp_offloaded = true;
+ qp->cq_prod = 0;
+
+ proto = p_hwfn->p_rdma_info->proto;
+ qed_roce_set_real_cid(p_hwfn, qp->icid -
+ qed_cxt_get_proto_cid_start(p_hwfn, proto));
+
+ return rc;
+
+err:
+ DP_NOTICE(p_hwfn, "create responder - failed, rc = %d\n", rc);
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ qp->irq_num_pages * RDMA_RING_PAGE_SIZE,
+ qp->irq, qp->irq_phys_addr);
+
+ return rc;
+}
+
+static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp)
+{
+ struct roce_create_qp_req_ramrod_data *p_ramrod;
+ u16 regular_latency_queue, low_latency_queue;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ enum protocol_type proto;
+ u16 flags = 0;
+ int rc;
+ u8 tc;
+
+ if (!qp->has_req)
+ return 0;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
+
+ /* Allocate DMA-able memory for ORQ */
+ qp->orq_num_pages = 1;
+ qp->orq = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ RDMA_RING_PAGE_SIZE,
+ &qp->orq_phys_addr, GFP_KERNEL);
+ if (!qp->orq) {
+ rc = -ENOMEM;
+ DP_NOTICE(p_hwfn,
+ "qed create requester failed: cannot allocate memory (orq). rc = %d\n",
+ rc);
+ return rc;
+ }
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qp->icid + 1;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ ROCE_RAMROD_CREATE_QP,
+ PROTOCOLID_ROCE, &init_data);
+ if (rc)
+ goto err;
+
+ SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR,
+ qed_roce_mode_to_flavor(qp->roce_mode));
+
+ SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN,
+ qp->fmr_and_reserved_lkey);
+
+ SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP,
+ qp->signal_all);
+
+ SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT,
+ qp->retry_cnt);
+
+ SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT,
+ qp->rnr_retry_cnt);
+
+ SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_XRC_FLAG,
+ qed_rdma_is_xrc_qp(qp));
+
+ p_ramrod = &p_ent->ramrod.roce_create_qp_req;
+ p_ramrod->flags = cpu_to_le16(flags);
+
+ SET_FIELD(p_ramrod->flags2, ROCE_CREATE_QP_REQ_RAMROD_DATA_EDPM_MODE,
+ qp->edpm_mode);
+
+ p_ramrod->max_ord = qp->max_rd_atomic_req;
+ p_ramrod->traffic_class = qp->traffic_class_tos;
+ p_ramrod->hop_limit = qp->hop_limit_ttl;
+ p_ramrod->orq_num_pages = qp->orq_num_pages;
+ p_ramrod->p_key = cpu_to_le16(qp->pkey);
+ p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
+ p_ramrod->dst_qp_id = cpu_to_le32(qp->dest_qp);
+ p_ramrod->ack_timeout_val = cpu_to_le32(qp->ack_timeout);
+ p_ramrod->mtu = cpu_to_le16(qp->mtu);
+ p_ramrod->initial_psn = cpu_to_le32(qp->sq_psn);
+ p_ramrod->pd = cpu_to_le16(qp->pd);
+ p_ramrod->sq_num_pages = cpu_to_le16(qp->sq_num_pages);
+ DMA_REGPAIR_LE(p_ramrod->sq_pbl_addr, qp->sq_pbl_ptr);
+ DMA_REGPAIR_LE(p_ramrod->orq_pbl_addr, qp->orq_phys_addr);
+ qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
+ p_ramrod->qp_handle_for_async.hi = qp->qp_handle_async.hi;
+ p_ramrod->qp_handle_for_async.lo = qp->qp_handle_async.lo;
+ p_ramrod->qp_handle_for_cqe.hi = qp->qp_handle.hi;
+ p_ramrod->qp_handle_for_cqe.lo = qp->qp_handle.lo;
+ p_ramrod->cq_cid =
+ cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id);
+
+ tc = qed_roce_get_qp_tc(p_hwfn, qp);
+ regular_latency_queue = qed_get_cm_pq_idx_ofld_mtc(p_hwfn, tc);
+ low_latency_queue = qed_get_cm_pq_idx_llt_mtc(p_hwfn, tc);
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "qp icid %u pqs: regular_latency %u low_latency %u\n",
+ qp->icid, regular_latency_queue - CM_TX_PQ_BASE,
+ low_latency_queue - CM_TX_PQ_BASE);
+ p_ramrod->regular_latency_phy_queue =
+ cpu_to_le16(regular_latency_queue);
+ p_ramrod->low_latency_phy_queue =
+ cpu_to_le16(low_latency_queue);
+
+ p_ramrod->dpi = cpu_to_le16(qp->dpi);
+
+ qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr);
+ qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr);
+
+ p_ramrod->udp_src_port = cpu_to_le16(qp->udp_src_port);
+ p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id);
+ p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) +
+ qp->stats_queue;
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc)
+ goto err;
+
+ qp->req_offloaded = true;
+ proto = p_hwfn->p_rdma_info->proto;
+ qed_roce_set_real_cid(p_hwfn,
+ qp->icid + 1 -
+ qed_cxt_get_proto_cid_start(p_hwfn, proto));
+
+ return rc;
+
+err:
+ DP_NOTICE(p_hwfn, "Create requested - failed, rc = %d\n", rc);
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ qp->orq_num_pages * RDMA_RING_PAGE_SIZE,
+ qp->orq, qp->orq_phys_addr);
+ return rc;
+}
+
+static int qed_roce_sp_modify_responder(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp,
+ bool move_to_err, u32 modify_flags)
+{
+ struct roce_modify_qp_resp_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ u16 flags = 0;
+ int rc;
+
+ if (!qp->has_resp)
+ return 0;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
+
+ if (move_to_err && !qp->resp_offloaded)
+ return 0;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qp->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ ROCE_EVENT_MODIFY_QP,
+ PROTOCOLID_ROCE, &init_data);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "rc = %d\n", rc);
+ return rc;
+ }
+
+ SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG,
+ !!move_to_err);
+
+ SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN,
+ qp->incoming_rdma_read_en);
+
+ SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN,
+ qp->incoming_rdma_write_en);
+
+ SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN,
+ qp->incoming_atomic_en);
+
+ SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN,
+ qp->e2e_flow_control_en);
+
+ SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG,
+ GET_FIELD(modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN));
+
+ SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG,
+ GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY));
+
+ SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG,
+ GET_FIELD(modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR));
+
+ SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG,
+ GET_FIELD(modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP));
+
+ SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG,
+ GET_FIELD(modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER));
+
+ p_ramrod = &p_ent->ramrod.roce_modify_qp_resp;
+ p_ramrod->flags = cpu_to_le16(flags);
+
+ p_ramrod->fields = 0;
+ SET_FIELD(p_ramrod->fields,
+ ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER,
+ qp->min_rnr_nak_timer);
+
+ p_ramrod->max_ird = qp->max_rd_atomic_resp;
+ p_ramrod->traffic_class = qp->traffic_class_tos;
+ p_ramrod->hop_limit = qp->hop_limit_ttl;
+ p_ramrod->p_key = cpu_to_le16(qp->pkey);
+ p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
+ p_ramrod->mtu = cpu_to_le16(qp->mtu);
+ qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify responder, rc = %d\n", rc);
+ return rc;
+}
+
+static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp,
+ bool move_to_sqd,
+ bool move_to_err, u32 modify_flags)
+{
+ struct roce_modify_qp_req_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ u16 flags = 0;
+ int rc;
+
+ if (!qp->has_req)
+ return 0;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
+
+ if (move_to_err && !(qp->req_offloaded))
+ return 0;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qp->icid + 1;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ ROCE_EVENT_MODIFY_QP,
+ PROTOCOLID_ROCE, &init_data);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "rc = %d\n", rc);
+ return rc;
+ }
+
+ SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG,
+ !!move_to_err);
+
+ SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG,
+ !!move_to_sqd);
+
+ SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY,
+ qp->sqd_async);
+
+ SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG,
+ GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY));
+
+ SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG,
+ GET_FIELD(modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR));
+
+ SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG,
+ GET_FIELD(modify_flags,
+ QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ));
+
+ SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG,
+ GET_FIELD(modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT));
+
+ SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG,
+ GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT));
+
+ SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG,
+ GET_FIELD(modify_flags,
+ QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT));
+
+ p_ramrod = &p_ent->ramrod.roce_modify_qp_req;
+ p_ramrod->flags = cpu_to_le16(flags);
+
+ p_ramrod->fields = 0;
+ SET_FIELD(p_ramrod->fields,
+ ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, qp->retry_cnt);
+ SET_FIELD(p_ramrod->fields, ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT,
+ qp->rnr_retry_cnt);
+
+ p_ramrod->max_ord = qp->max_rd_atomic_req;
+ p_ramrod->traffic_class = qp->traffic_class_tos;
+ p_ramrod->hop_limit = qp->hop_limit_ttl;
+ p_ramrod->p_key = cpu_to_le16(qp->pkey);
+ p_ramrod->flow_label = cpu_to_le32(qp->flow_label);
+ p_ramrod->ack_timeout_val = cpu_to_le32(qp->ack_timeout);
+ p_ramrod->mtu = cpu_to_le16(qp->mtu);
+ qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid);
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify requester, rc = %d\n", rc);
+ return rc;
+}
+
+static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp,
+ u32 *cq_prod)
+{
+ struct roce_destroy_qp_resp_output_params *p_ramrod_res;
+ struct roce_destroy_qp_resp_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ dma_addr_t ramrod_res_phys;
+ int rc;
+
+ if (!qp->has_resp) {
+ *cq_prod = 0;
+ return 0;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
+ *cq_prod = qp->cq_prod;
+
+ if (!qp->resp_offloaded) {
+ /* If a responder was never offload, we need to free the cids
+ * allocated in create_qp as a FW async event will never arrive
+ */
+ u32 cid;
+
+ cid = qp->icid -
+ qed_cxt_get_proto_cid_start(p_hwfn,
+ p_hwfn->p_rdma_info->proto);
+ qed_roce_free_cid_pair(p_hwfn, (u16)cid);
+
+ return 0;
+ }
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qp->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ ROCE_RAMROD_DESTROY_QP,
+ PROTOCOLID_ROCE, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.roce_destroy_qp_resp;
+
+ p_ramrod_res = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(*p_ramrod_res),
+ &ramrod_res_phys, GFP_KERNEL);
+
+ if (!p_ramrod_res) {
+ rc = -ENOMEM;
+ DP_NOTICE(p_hwfn,
+ "qed destroy responder failed: cannot allocate memory (ramrod). rc = %d\n",
+ rc);
+ qed_sp_destroy_request(p_hwfn, p_ent);
+ return rc;
+ }
+
+ DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc)
+ goto err;
+
+ *cq_prod = le32_to_cpu(p_ramrod_res->cq_prod);
+ qp->cq_prod = *cq_prod;
+
+ /* Free IRQ - only if ramrod succeeded, in case FW is still using it */
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ qp->irq_num_pages * RDMA_RING_PAGE_SIZE,
+ qp->irq, qp->irq_phys_addr);
+
+ qp->resp_offloaded = false;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroy responder, rc = %d\n", rc);
+
+err:
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(struct roce_destroy_qp_resp_output_params),
+ p_ramrod_res, ramrod_res_phys);
+
+ return rc;
+}
+
+static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp)
+{
+ struct roce_destroy_qp_req_output_params *p_ramrod_res;
+ struct roce_destroy_qp_req_ramrod_data *p_ramrod;
+ struct qed_sp_init_data init_data;
+ struct qed_spq_entry *p_ent;
+ dma_addr_t ramrod_res_phys;
+ int rc = -ENOMEM;
+
+ if (!qp->has_req)
+ return 0;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
+
+ if (!qp->req_offloaded)
+ return 0;
+
+ p_ramrod_res = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(*p_ramrod_res),
+ &ramrod_res_phys, GFP_KERNEL);
+ if (!p_ramrod_res) {
+ DP_NOTICE(p_hwfn,
+ "qed destroy requester failed: cannot allocate memory (ramrod)\n");
+ return rc;
+ }
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qp->icid + 1;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_DESTROY_QP,
+ PROTOCOLID_ROCE, &init_data);
+ if (rc)
+ goto err;
+
+ p_ramrod = &p_ent->ramrod.roce_destroy_qp_req;
+ DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys);
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc)
+ goto err;
+
+ /* Free ORQ - only if ramrod succeeded, in case FW is still using it */
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ qp->orq_num_pages * RDMA_RING_PAGE_SIZE,
+ qp->orq, qp->orq_phys_addr);
+
+ qp->req_offloaded = false;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroy requester, rc = %d\n", rc);
+
+err:
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_ramrod_res),
+ p_ramrod_res, ramrod_res_phys);
+
+ return rc;
+}
+
+int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp,
+ struct qed_rdma_query_qp_out_params *out_params)
+{
+ struct roce_query_qp_resp_output_params *p_resp_ramrod_res;
+ struct roce_query_qp_req_output_params *p_req_ramrod_res;
+ struct roce_query_qp_resp_ramrod_data *p_resp_ramrod;
+ struct roce_query_qp_req_ramrod_data *p_req_ramrod;
+ struct qed_sp_init_data init_data;
+ dma_addr_t resp_ramrod_res_phys;
+ dma_addr_t req_ramrod_res_phys;
+ struct qed_spq_entry *p_ent;
+ bool rq_err_state;
+ bool sq_err_state;
+ bool sq_draining;
+ int rc = -ENOMEM;
+
+ if ((!(qp->resp_offloaded)) && (!(qp->req_offloaded))) {
+ /* We can't send ramrod to the fw since this qp wasn't offloaded
+ * to the fw yet
+ */
+ out_params->draining = false;
+ out_params->rq_psn = qp->rq_psn;
+ out_params->sq_psn = qp->sq_psn;
+ out_params->state = qp->cur_state;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "No QPs as no offload\n");
+ return 0;
+ }
+
+ if (!(qp->resp_offloaded)) {
+ DP_NOTICE(p_hwfn,
+ "The responder's qp should be offloaded before requester's\n");
+ return -EINVAL;
+ }
+
+ /* Send a query responder ramrod to FW to get RQ-PSN and state */
+ p_resp_ramrod_res =
+ dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(*p_resp_ramrod_res),
+ &resp_ramrod_res_phys, GFP_KERNEL);
+ if (!p_resp_ramrod_res) {
+ DP_NOTICE(p_hwfn,
+ "qed query qp failed: cannot allocate memory (ramrod)\n");
+ return rc;
+ }
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qp->icid;
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+ rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP,
+ PROTOCOLID_ROCE, &init_data);
+ if (rc)
+ goto err_resp;
+
+ p_resp_ramrod = &p_ent->ramrod.roce_query_qp_resp;
+ DMA_REGPAIR_LE(p_resp_ramrod->output_params_addr, resp_ramrod_res_phys);
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc)
+ goto err_resp;
+
+ out_params->rq_psn = le32_to_cpu(p_resp_ramrod_res->psn);
+ rq_err_state = GET_FIELD(le32_to_cpu(p_resp_ramrod_res->flags),
+ ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG);
+
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res),
+ p_resp_ramrod_res, resp_ramrod_res_phys);
+
+ if (!(qp->req_offloaded)) {
+ /* Don't send query qp for the requester */
+ out_params->sq_psn = qp->sq_psn;
+ out_params->draining = false;
+
+ if (rq_err_state)
+ qp->cur_state = QED_ROCE_QP_STATE_ERR;
+
+ out_params->state = qp->cur_state;
+
+ return 0;
+ }
+
+ /* Send a query requester ramrod to FW to get SQ-PSN and state */
+ p_req_ramrod_res = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(*p_req_ramrod_res),
+ &req_ramrod_res_phys,
+ GFP_KERNEL);
+ if (!p_req_ramrod_res) {
+ rc = -ENOMEM;
+ DP_NOTICE(p_hwfn,
+ "qed query qp failed: cannot allocate memory (ramrod)\n");
+ return rc;
+ }
+
+ /* Get SPQ entry */
+ init_data.cid = qp->icid + 1;
+ rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP,
+ PROTOCOLID_ROCE, &init_data);
+ if (rc)
+ goto err_req;
+
+ p_req_ramrod = &p_ent->ramrod.roce_query_qp_req;
+ DMA_REGPAIR_LE(p_req_ramrod->output_params_addr, req_ramrod_res_phys);
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc)
+ goto err_req;
+
+ out_params->sq_psn = le32_to_cpu(p_req_ramrod_res->psn);
+ sq_err_state = GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags),
+ ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG);
+ sq_draining =
+ GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags),
+ ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG);
+
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res),
+ p_req_ramrod_res, req_ramrod_res_phys);
+
+ out_params->draining = false;
+
+ if (rq_err_state || sq_err_state)
+ qp->cur_state = QED_ROCE_QP_STATE_ERR;
+ else if (sq_draining)
+ out_params->draining = true;
+ out_params->state = qp->cur_state;
+
+ return 0;
+
+err_req:
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res),
+ p_req_ramrod_res, req_ramrod_res_phys);
+ return rc;
+err_resp:
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res),
+ p_resp_ramrod_res, resp_ramrod_res_phys);
+ return rc;
+}
+
+int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
+{
+ u32 cq_prod;
+ int rc;
+
+ /* Destroys the specified QP */
+ if ((qp->cur_state != QED_ROCE_QP_STATE_RESET) &&
+ (qp->cur_state != QED_ROCE_QP_STATE_ERR) &&
+ (qp->cur_state != QED_ROCE_QP_STATE_INIT)) {
+ DP_NOTICE(p_hwfn,
+ "QP must be in error, reset or init state before destroying it\n");
+ return -EINVAL;
+ }
+
+ if (qp->cur_state != QED_ROCE_QP_STATE_RESET) {
+ rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp,
+ &cq_prod);
+ if (rc)
+ return rc;
+
+ /* Send destroy requester ramrod */
+ rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp,
+ enum qed_roce_qp_state prev_state,
+ struct qed_rdma_modify_qp_in_params *params)
+{
+ int rc = 0;
+
+ /* Perform additional operations according to the current state and the
+ * next state
+ */
+ if (((prev_state == QED_ROCE_QP_STATE_INIT) ||
+ (prev_state == QED_ROCE_QP_STATE_RESET)) &&
+ (qp->cur_state == QED_ROCE_QP_STATE_RTR)) {
+ /* Init->RTR or Reset->RTR */
+ rc = qed_roce_sp_create_responder(p_hwfn, qp);
+ return rc;
+ } else if ((prev_state == QED_ROCE_QP_STATE_RTR) &&
+ (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
+ /* RTR-> RTS */
+ rc = qed_roce_sp_create_requester(p_hwfn, qp);
+ if (rc)
+ return rc;
+
+ /* Send modify responder ramrod */
+ rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
+ params->modify_flags);
+ return rc;
+ } else if ((prev_state == QED_ROCE_QP_STATE_RTS) &&
+ (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
+ /* RTS->RTS */
+ rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
+ params->modify_flags);
+ if (rc)
+ return rc;
+
+ rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
+ params->modify_flags);
+ return rc;
+ } else if ((prev_state == QED_ROCE_QP_STATE_RTS) &&
+ (qp->cur_state == QED_ROCE_QP_STATE_SQD)) {
+ /* RTS->SQD */
+ rc = qed_roce_sp_modify_requester(p_hwfn, qp, true, false,
+ params->modify_flags);
+ return rc;
+ } else if ((prev_state == QED_ROCE_QP_STATE_SQD) &&
+ (qp->cur_state == QED_ROCE_QP_STATE_SQD)) {
+ /* SQD->SQD */
+ rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
+ params->modify_flags);
+ if (rc)
+ return rc;
+
+ rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
+ params->modify_flags);
+ return rc;
+ } else if ((prev_state == QED_ROCE_QP_STATE_SQD) &&
+ (qp->cur_state == QED_ROCE_QP_STATE_RTS)) {
+ /* SQD->RTS */
+ rc = qed_roce_sp_modify_responder(p_hwfn, qp, false,
+ params->modify_flags);
+ if (rc)
+ return rc;
+
+ rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false,
+ params->modify_flags);
+
+ return rc;
+ } else if (qp->cur_state == QED_ROCE_QP_STATE_ERR) {
+ /* ->ERR */
+ rc = qed_roce_sp_modify_responder(p_hwfn, qp, true,
+ params->modify_flags);
+ if (rc)
+ return rc;
+
+ rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, true,
+ params->modify_flags);
+ return rc;
+ } else if (qp->cur_state == QED_ROCE_QP_STATE_RESET) {
+ /* Any state -> RESET */
+ u32 cq_prod;
+
+ /* Send destroy responder ramrod */
+ rc = qed_roce_sp_destroy_qp_responder(p_hwfn,
+ qp,
+ &cq_prod);
+
+ if (rc)
+ return rc;
+
+ qp->cq_prod = cq_prod;
+
+ rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp);
+ } else {
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n");
+ }
+
+ return rc;
+}
+
+static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid)
+{
+ struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
+ u32 start_cid, cid, xcid;
+
+ /* an even icid belongs to a responder while an odd icid belongs to a
+ * requester. The 'cid' received as an input can be either. We calculate
+ * the "partner" icid and call it xcid. Only if both are free then the
+ * "cid" map can be cleared.
+ */
+ start_cid = qed_cxt_get_proto_cid_start(p_hwfn, p_rdma_info->proto);
+ cid = icid - start_cid;
+ xcid = cid ^ 1;
+
+ spin_lock_bh(&p_rdma_info->lock);
+
+ qed_bmap_release_id(p_hwfn, &p_rdma_info->real_cid_map, cid);
+ if (qed_bmap_test_id(p_hwfn, &p_rdma_info->real_cid_map, xcid) == 0) {
+ qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, cid);
+ qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, xcid);
+ }
+
+ spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
+}
+
+void qed_roce_dpm_dcbx(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u8 val;
+
+ /* if any QPs are already active, we want to disable DPM, since their
+ * context information contains information from before the latest DCBx
+ * update. Otherwise enable it.
+ */
+ val = qed_rdma_allocated_qps(p_hwfn) ? true : false;
+ p_hwfn->dcbx_no_edpm = (u8)val;
+
+ qed_rdma_dpm_conf(p_hwfn, p_ptt);
+}
+
+int qed_roce_setup(struct qed_hwfn *p_hwfn)
+{
+ return qed_spq_register_async_cb(p_hwfn, PROTOCOLID_ROCE,
+ qed_roce_async_event);
+}
+
+int qed_roce_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 ll2_ethertype_en;
+
+ qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0);
+
+ p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_ROCE;
+
+ ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
+ (ll2_ethertype_en | 0x01));
+
+ if (qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_ROCE) % 2) {
+ DP_NOTICE(p_hwfn, "The first RoCE's cid should be even\n");
+ return -EINVAL;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW - Done\n");
+ return 0;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.h b/drivers/net/ethernet/qlogic/qed/qed_roce.h
new file mode 100644
index 000000000..3a4a2d72a
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ * Copyright (c) 2019-2020 Marvell International Ltd.
+ */
+
+#ifndef _QED_ROCE_H
+#define _QED_ROCE_H
+#include <linux/types.h>
+#include <linux/slab.h>
+
+#if IS_ENABLED(CONFIG_QED_RDMA)
+void qed_roce_dpm_dcbx(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+#else
+static inline void qed_roce_dpm_dcbx(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt) {}
+#endif
+
+int qed_roce_setup(struct qed_hwfn *p_hwfn);
+void qed_roce_stop(struct qed_hwfn *p_hwfn);
+int qed_roce_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid);
+int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp);
+
+int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp,
+ struct qed_rdma_query_qp_out_params *out_params);
+
+int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_qp *qp,
+ enum qed_roce_qp_state prev_state,
+ struct qed_rdma_modify_qp_in_params *params);
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_selftest.c b/drivers/net/ethernet/qlogic/qed/qed_selftest.c
new file mode 100644
index 000000000..6e70781ab
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_selftest.c
@@ -0,0 +1,185 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2016 QLogic Corporation
+ * Copyright (c) 2019-2020 Marvell International Ltd.
+ */
+
+#include <linux/crc32.h>
+#include "qed.h"
+#include "qed_dev_api.h"
+#include "qed_mcp.h"
+#include "qed_sp.h"
+#include "qed_selftest.h"
+
+int qed_selftest_memory(struct qed_dev *cdev)
+{
+ int rc = 0, i;
+
+ for_each_hwfn(cdev, i) {
+ rc = qed_sp_heartbeat_ramrod(&cdev->hwfns[i]);
+ if (rc)
+ return rc;
+ }
+
+ return rc;
+}
+
+int qed_selftest_interrupt(struct qed_dev *cdev)
+{
+ int rc = 0, i;
+
+ for_each_hwfn(cdev, i) {
+ rc = qed_sp_heartbeat_ramrod(&cdev->hwfns[i]);
+ if (rc)
+ return rc;
+ }
+
+ return rc;
+}
+
+int qed_selftest_register(struct qed_dev *cdev)
+{
+ struct qed_hwfn *p_hwfn;
+ struct qed_ptt *p_ptt;
+ int rc = 0, i;
+
+ /* although performed by MCP, this test is per engine */
+ for_each_hwfn(cdev, i) {
+ p_hwfn = &cdev->hwfns[i];
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ DP_ERR(p_hwfn, "failed to acquire ptt\n");
+ return -EBUSY;
+ }
+ rc = qed_mcp_bist_register_test(p_hwfn, p_ptt);
+ qed_ptt_release(p_hwfn, p_ptt);
+ if (rc)
+ break;
+ }
+
+ return rc;
+}
+
+int qed_selftest_clock(struct qed_dev *cdev)
+{
+ struct qed_hwfn *p_hwfn;
+ struct qed_ptt *p_ptt;
+ int rc = 0, i;
+
+ /* although performed by MCP, this test is per engine */
+ for_each_hwfn(cdev, i) {
+ p_hwfn = &cdev->hwfns[i];
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ DP_ERR(p_hwfn, "failed to acquire ptt\n");
+ return -EBUSY;
+ }
+ rc = qed_mcp_bist_clock_test(p_hwfn, p_ptt);
+ qed_ptt_release(p_hwfn, p_ptt);
+ if (rc)
+ break;
+ }
+
+ return rc;
+}
+
+int qed_selftest_nvram(struct qed_dev *cdev)
+{
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
+ u32 num_images, i, j, nvm_crc, calc_crc;
+ struct bist_nvm_image_att image_att;
+ u8 *buf = NULL;
+ __be32 val;
+ int rc;
+
+ if (!p_ptt) {
+ DP_ERR(p_hwfn, "failed to acquire ptt\n");
+ return -EBUSY;
+ }
+
+ /* Acquire from MFW the amount of available images */
+ rc = qed_mcp_bist_nvm_get_num_images(p_hwfn, p_ptt, &num_images);
+ if (rc || !num_images) {
+ DP_ERR(p_hwfn, "Failed getting number of images\n");
+ rc = -EINVAL;
+ goto err0;
+ }
+
+ /* Iterate over images and validate CRC */
+ for (i = 0; i < num_images; i++) {
+ /* This mailbox returns information about the image required for
+ * reading it.
+ */
+ rc = qed_mcp_bist_nvm_get_image_att(p_hwfn, p_ptt,
+ &image_att, i);
+ if (rc) {
+ DP_ERR(p_hwfn,
+ "Failed getting image index %d attributes\n",
+ i);
+ goto err0;
+ }
+
+ /* After MFW crash dump is collected - the image's CRC stops
+ * being valid.
+ */
+ if (image_att.image_type == NVM_TYPE_MDUMP)
+ continue;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SP, "image index %d, size %x\n",
+ i, image_att.len);
+
+ /* Allocate a buffer for holding the nvram image */
+ buf = kzalloc(image_att.len, GFP_KERNEL);
+ if (!buf) {
+ rc = -ENOMEM;
+ goto err0;
+ }
+
+ /* Read image into buffer */
+ rc = qed_mcp_nvm_read(p_hwfn->cdev, image_att.nvm_start_addr,
+ buf, image_att.len);
+ if (rc) {
+ DP_ERR(p_hwfn,
+ "Failed reading image index %d from nvm.\n", i);
+ goto err1;
+ }
+
+ /* Convert the buffer into big-endian format (excluding the
+ * closing 4 bytes of CRC).
+ */
+ for (j = 0; j < image_att.len - 4; j += 4) {
+ val = cpu_to_be32(*(u32 *)&buf[j]);
+ *(u32 *)&buf[j] = (__force u32)val;
+ }
+
+ /* Calc CRC for the "actual" image buffer, i.e. not including
+ * the last 4 CRC bytes.
+ */
+ nvm_crc = *(u32 *)(buf + image_att.len - 4);
+ calc_crc = crc32(0xffffffff, buf, image_att.len - 4);
+ calc_crc = (__force u32)~cpu_to_be32(calc_crc);
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "nvm crc 0x%x, calc_crc 0x%x\n", nvm_crc, calc_crc);
+
+ if (calc_crc != nvm_crc) {
+ rc = -EINVAL;
+ goto err1;
+ }
+
+ /* Done with this image; Free to prevent double release
+ * on subsequent failure.
+ */
+ kfree(buf);
+ buf = NULL;
+ }
+
+ qed_ptt_release(p_hwfn, p_ptt);
+ return 0;
+
+err1:
+ kfree(buf);
+err0:
+ qed_ptt_release(p_hwfn, p_ptt);
+ return rc;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_selftest.h b/drivers/net/ethernet/qlogic/qed/qed_selftest.h
new file mode 100644
index 000000000..7a3bd749e
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_selftest.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* Copyright (c) 2019-2020 Marvell International Ltd. */
+
+#ifndef _QED_SELFTEST_API_H
+#define _QED_SELFTEST_API_H
+#include <linux/types.h>
+
+/**
+ * qed_selftest_memory(): Perform memory test.
+ *
+ * @cdev: Qed dev pointer.
+ *
+ * Return: Int.
+ */
+int qed_selftest_memory(struct qed_dev *cdev);
+
+/**
+ * qed_selftest_interrupt(): Perform interrupt test.
+ *
+ * @cdev: Qed dev pointer.
+ *
+ * Return: Int.
+ */
+int qed_selftest_interrupt(struct qed_dev *cdev);
+
+/**
+ * qed_selftest_register(): Perform register test.
+ *
+ * @cdev: Qed dev pointer.
+ *
+ * Return: Int.
+ */
+int qed_selftest_register(struct qed_dev *cdev);
+
+/**
+ * qed_selftest_clock(): Perform clock test.
+ *
+ * @cdev: Qed dev pointer.
+ *
+ * Return: Int.
+ */
+int qed_selftest_clock(struct qed_dev *cdev);
+
+/**
+ * qed_selftest_nvram(): Perform nvram test.
+ *
+ * @cdev: Qed dev pointer.
+ *
+ * Return: Int.
+ */
+int qed_selftest_nvram(struct qed_dev *cdev);
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
new file mode 100644
index 000000000..4fb02a557
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -0,0 +1,477 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ * Copyright (c) 2019-2020 Marvell International Ltd.
+ */
+
+#ifndef _QED_SP_H
+#define _QED_SP_H
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/qed/qed_chain.h>
+#include "qed.h"
+#include "qed_hsi.h"
+
+enum spq_mode {
+ QED_SPQ_MODE_BLOCK, /* Client will poll a designated mem. address */
+ QED_SPQ_MODE_CB, /* Client supplies a callback */
+ QED_SPQ_MODE_EBLOCK, /* QED should block until completion */
+};
+
+struct qed_spq_comp_cb {
+ void (*function)(struct qed_hwfn *p_hwfn,
+ void *cookie,
+ union event_ring_data *data,
+ u8 fw_return_code);
+ void *cookie;
+};
+
+/**
+ * qed_eth_cqe_completion(): handles the completion of a
+ * ramrod on the cqe ring.
+ *
+ * @p_hwfn: HW device data.
+ * @cqe: CQE.
+ *
+ * Return: Int.
+ */
+int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
+ struct eth_slow_path_rx_cqe *cqe);
+
+ /* QED Slow-hwfn queue interface */
+union ramrod_data {
+ struct pf_start_ramrod_data pf_start;
+ struct pf_update_ramrod_data pf_update;
+ struct rx_queue_start_ramrod_data rx_queue_start;
+ struct rx_queue_update_ramrod_data rx_queue_update;
+ struct rx_queue_stop_ramrod_data rx_queue_stop;
+ struct tx_queue_start_ramrod_data tx_queue_start;
+ struct tx_queue_stop_ramrod_data tx_queue_stop;
+ struct vport_start_ramrod_data vport_start;
+ struct vport_stop_ramrod_data vport_stop;
+ struct rx_update_gft_filter_ramrod_data rx_update_gft;
+ struct vport_update_ramrod_data vport_update;
+ struct core_rx_start_ramrod_data core_rx_queue_start;
+ struct core_rx_stop_ramrod_data core_rx_queue_stop;
+ struct core_tx_start_ramrod_data core_tx_queue_start;
+ struct core_tx_stop_ramrod_data core_tx_queue_stop;
+ struct vport_filter_update_ramrod_data vport_filter_update;
+
+ struct rdma_init_func_ramrod_data rdma_init_func;
+ struct rdma_close_func_ramrod_data rdma_close_func;
+ struct rdma_register_tid_ramrod_data rdma_register_tid;
+ struct rdma_deregister_tid_ramrod_data rdma_deregister_tid;
+ struct roce_create_qp_resp_ramrod_data roce_create_qp_resp;
+ struct roce_create_qp_req_ramrod_data roce_create_qp_req;
+ struct roce_modify_qp_resp_ramrod_data roce_modify_qp_resp;
+ struct roce_modify_qp_req_ramrod_data roce_modify_qp_req;
+ struct roce_query_qp_resp_ramrod_data roce_query_qp_resp;
+ struct roce_query_qp_req_ramrod_data roce_query_qp_req;
+ struct roce_destroy_qp_resp_ramrod_data roce_destroy_qp_resp;
+ struct roce_destroy_qp_req_ramrod_data roce_destroy_qp_req;
+ struct roce_init_func_ramrod_data roce_init_func;
+ struct rdma_create_cq_ramrod_data rdma_create_cq;
+ struct rdma_destroy_cq_ramrod_data rdma_destroy_cq;
+ struct rdma_srq_create_ramrod_data rdma_create_srq;
+ struct rdma_srq_destroy_ramrod_data rdma_destroy_srq;
+ struct rdma_srq_modify_ramrod_data rdma_modify_srq;
+ struct iwarp_create_qp_ramrod_data iwarp_create_qp;
+ struct iwarp_tcp_offload_ramrod_data iwarp_tcp_offload;
+ struct iwarp_mpa_offload_ramrod_data iwarp_mpa_offload;
+ struct iwarp_modify_qp_ramrod_data iwarp_modify_qp;
+ struct iwarp_init_func_ramrod_data iwarp_init_func;
+ struct fcoe_init_ramrod_params fcoe_init;
+ struct fcoe_conn_offload_ramrod_params fcoe_conn_ofld;
+ struct fcoe_conn_terminate_ramrod_params fcoe_conn_terminate;
+ struct fcoe_stat_ramrod_params fcoe_stat;
+
+ struct iscsi_init_ramrod_params iscsi_init;
+ struct iscsi_spe_conn_offload iscsi_conn_offload;
+ struct iscsi_conn_update_ramrod_params iscsi_conn_update;
+ struct iscsi_spe_conn_mac_update iscsi_conn_mac_update;
+ struct iscsi_spe_conn_termination iscsi_conn_terminate;
+
+ struct nvmetcp_init_ramrod_params nvmetcp_init;
+ struct nvmetcp_spe_conn_offload nvmetcp_conn_offload;
+ struct nvmetcp_conn_update_ramrod_params nvmetcp_conn_update;
+ struct nvmetcp_spe_conn_termination nvmetcp_conn_terminate;
+
+ struct vf_start_ramrod_data vf_start;
+ struct vf_stop_ramrod_data vf_stop;
+};
+
+#define EQ_MAX_CREDIT 0xffffffff
+
+enum spq_priority {
+ QED_SPQ_PRIORITY_NORMAL,
+ QED_SPQ_PRIORITY_HIGH,
+};
+
+union qed_spq_req_comp {
+ struct qed_spq_comp_cb cb;
+ u64 *done_addr;
+};
+
+struct qed_spq_comp_done {
+ unsigned int done;
+ u8 fw_return_code;
+};
+
+struct qed_spq_entry {
+ struct list_head list;
+
+ u8 flags;
+
+ /* HSI slow path element */
+ struct slow_path_element elem;
+
+ union ramrod_data ramrod;
+
+ enum spq_priority priority;
+
+ /* pending queue for this entry */
+ struct list_head *queue;
+
+ enum spq_mode comp_mode;
+ struct qed_spq_comp_cb comp_cb;
+ struct qed_spq_comp_done comp_done; /* SPQ_MODE_EBLOCK */
+
+ /* Posted entry for unlimited list entry in EBLOCK mode */
+ struct qed_spq_entry *post_ent;
+};
+
+struct qed_eq {
+ struct qed_chain chain;
+ u8 eq_sb_index; /* index within the SB */
+ __le16 *p_fw_cons; /* ptr to index value */
+};
+
+struct qed_consq {
+ struct qed_chain chain;
+};
+
+typedef int (*qed_spq_async_comp_cb)(struct qed_hwfn *p_hwfn, u8 opcode,
+ __le16 echo, union event_ring_data *data,
+ u8 fw_return_code);
+
+int
+qed_spq_register_async_cb(struct qed_hwfn *p_hwfn,
+ enum protocol_type protocol_id,
+ qed_spq_async_comp_cb cb);
+
+void
+qed_spq_unregister_async_cb(struct qed_hwfn *p_hwfn,
+ enum protocol_type protocol_id);
+
+struct qed_spq {
+ spinlock_t lock; /* SPQ lock */
+
+ struct list_head unlimited_pending;
+ struct list_head pending;
+ struct list_head completion_pending;
+ struct list_head free_pool;
+
+ struct qed_chain chain;
+
+ /* allocated dma-able memory for spq entries (+ramrod data) */
+ dma_addr_t p_phys;
+ struct qed_spq_entry *p_virt;
+
+#define SPQ_RING_SIZE \
+ (CORE_SPQE_PAGE_SIZE_BYTES / sizeof(struct slow_path_element))
+
+ /* Bitmap for handling out-of-order completions */
+ DECLARE_BITMAP(p_comp_bitmap, SPQ_RING_SIZE);
+ u8 comp_bitmap_idx;
+
+ /* Statistics */
+ u32 unlimited_pending_count;
+ u32 normal_count;
+ u32 high_count;
+ u32 comp_sent_count;
+ u32 comp_count;
+
+ u32 cid;
+ u32 db_addr_offset;
+ struct core_db_data db_data;
+ qed_spq_async_comp_cb async_comp_cb[MAX_PROTOCOL_TYPE];
+};
+
+/**
+ * qed_spq_post(): Posts a Slow hwfn request to FW, or lacking that
+ * Pends it to the future list.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ent: Ent.
+ * @fw_return_code: Return code from firmware.
+ *
+ * Return: Int.
+ */
+int qed_spq_post(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent,
+ u8 *fw_return_code);
+
+/**
+ * qed_spq_alloc(): Alloocates & initializes the SPQ and EQ.
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Return: Int.
+ */
+int qed_spq_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * qed_spq_setup(): Reset the SPQ to its start state.
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Return: Void.
+ */
+void qed_spq_setup(struct qed_hwfn *p_hwfn);
+
+/**
+ * qed_spq_free(): Deallocates the given SPQ struct.
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Return: Void.
+ */
+void qed_spq_free(struct qed_hwfn *p_hwfn);
+
+/**
+ * qed_spq_get_entry(): Obtain an entrry from the spq
+ * free pool list.
+ *
+ * @p_hwfn: HW device data.
+ * @pp_ent: PP ENT.
+ *
+ * Return: Int.
+ */
+int
+qed_spq_get_entry(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry **pp_ent);
+
+/**
+ * qed_spq_return_entry(): Return an entry to spq free pool list.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ent: P ENT.
+ *
+ * Return: Void.
+ */
+void qed_spq_return_entry(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent);
+/**
+ * qed_eq_alloc(): Allocates & initializes an EQ struct.
+ *
+ * @p_hwfn: HW device data.
+ * @num_elem: number of elements in the eq.
+ *
+ * Return: Int.
+ */
+int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem);
+
+/**
+ * qed_eq_setup(): Reset the EQ to its start state.
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Return: Void.
+ */
+void qed_eq_setup(struct qed_hwfn *p_hwfn);
+
+/**
+ * qed_eq_free(): deallocates the given EQ struct.
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Return: Void.
+ */
+void qed_eq_free(struct qed_hwfn *p_hwfn);
+
+/**
+ * qed_eq_prod_update(): update the FW with default EQ producer.
+ *
+ * @p_hwfn: HW device data.
+ * @prod: Prod.
+ *
+ * Return: Void.
+ */
+void qed_eq_prod_update(struct qed_hwfn *p_hwfn,
+ u16 prod);
+
+/**
+ * qed_eq_completion(): Completes currently pending EQ elements.
+ *
+ * @p_hwfn: HW device data.
+ * @cookie: Cookie.
+ *
+ * Return: Int.
+ */
+int qed_eq_completion(struct qed_hwfn *p_hwfn,
+ void *cookie);
+
+/**
+ * qed_spq_completion(): Completes a single event.
+ *
+ * @p_hwfn: HW device data.
+ * @echo: echo value from cookie (used for determining completion).
+ * @fw_return_code: FW return code.
+ * @p_data: data from cookie (used in callback function if applicable).
+ *
+ * Return: Int.
+ */
+int qed_spq_completion(struct qed_hwfn *p_hwfn,
+ __le16 echo,
+ u8 fw_return_code,
+ union event_ring_data *p_data);
+
+/**
+ * qed_spq_get_cid(): Given p_hwfn, return cid for the hwfn's SPQ.
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Return: u32 - SPQ CID.
+ */
+u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn);
+
+/**
+ * qed_consq_alloc(): Allocates & initializes an ConsQ struct.
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Return: Int.
+ */
+int qed_consq_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * qed_consq_setup(): Reset the ConsQ to its start state.
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Return Void.
+ */
+void qed_consq_setup(struct qed_hwfn *p_hwfn);
+
+/**
+ * qed_consq_free(): deallocates the given ConsQ struct.
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Return Void.
+ */
+void qed_consq_free(struct qed_hwfn *p_hwfn);
+int qed_spq_pend_post(struct qed_hwfn *p_hwfn);
+
+/* Slow-hwfn low-level commands (Ramrods) function definitions. */
+
+#define QED_SP_EQ_COMPLETION 0x01
+#define QED_SP_CQE_COMPLETION 0x02
+
+struct qed_sp_init_data {
+ u32 cid;
+ u16 opaque_fid;
+
+ /* Information regarding operation upon sending & completion */
+ enum spq_mode comp_mode;
+ struct qed_spq_comp_cb *p_comp_data;
+};
+
+/**
+ * qed_sp_destroy_request(): Returns a SPQ entry to the pool / frees the
+ * entry if allocated. Should be called on in error
+ * flows after initializing the SPQ entry
+ * and before posting it.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ent: Ent.
+ *
+ * Return: Void.
+ */
+void qed_sp_destroy_request(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent);
+
+int qed_sp_init_request(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry **pp_ent,
+ u8 cmd,
+ u8 protocol,
+ struct qed_sp_init_data *p_data);
+
+/**
+ * qed_sp_pf_start(): PF Function Start Ramrod.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @p_tunn: P_tunn.
+ * @allow_npar_tx_switch: Allow NPAR TX Switch.
+ *
+ * Return: Int.
+ *
+ * This ramrod is sent to initialize a physical function (PF). It will
+ * configure the function related parameters and write its completion to the
+ * event ring specified in the parameters.
+ *
+ * Ramrods complete on the common event ring for the PF. This ring is
+ * allocated by the driver on host memory and its parameters are written
+ * to the internal RAM of the UStorm by the Function Start Ramrod.
+ *
+ */
+
+int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_tunnel_info *p_tunn,
+ bool allow_npar_tx_switch);
+
+/**
+ * qed_sp_pf_update(): PF Function Update Ramrod.
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Return: Int.
+ *
+ * This ramrod updates function-related parameters. Every parameter can be
+ * updated independently, according to configuration flags.
+ */
+
+int qed_sp_pf_update(struct qed_hwfn *p_hwfn);
+
+/**
+ * qed_sp_pf_update_stag(): Update firmware of new outer tag.
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Return: Int.
+ */
+int qed_sp_pf_update_stag(struct qed_hwfn *p_hwfn);
+
+/**
+ * qed_sp_pf_update_ufp(): PF ufp update Ramrod.
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Return: Int.
+ */
+int qed_sp_pf_update_ufp(struct qed_hwfn *p_hwfn);
+
+int qed_sp_pf_stop(struct qed_hwfn *p_hwfn);
+
+int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_tunnel_info *p_tunn,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data);
+/**
+ * qed_sp_heartbeat_ramrod(): Send empty Ramrod.
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Return: Int.
+ */
+
+int qed_sp_heartbeat_ramrod(struct qed_hwfn *p_hwfn);
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
new file mode 100644
index 000000000..3b54da963
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -0,0 +1,595 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ * Copyright (c) 2019-2020 Marvell International Ltd.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include "qed.h"
+#include <linux/qed/qed_chain.h>
+#include "qed_cxt.h"
+#include "qed_dcbx.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_int.h"
+#include "qed_reg_addr.h"
+#include "qed_sp.h"
+#include "qed_sriov.h"
+
+void qed_sp_destroy_request(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent)
+{
+ /* qed_spq_get_entry() can either get an entry from the free_pool,
+ * or, if no entries are left, allocate a new entry and add it to
+ * the unlimited_pending list.
+ */
+ if (p_ent->queue == &p_hwfn->p_spq->unlimited_pending)
+ kfree(p_ent);
+ else
+ qed_spq_return_entry(p_hwfn, p_ent);
+}
+
+int qed_sp_init_request(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry **pp_ent,
+ u8 cmd, u8 protocol, struct qed_sp_init_data *p_data)
+{
+ u32 opaque_cid = p_data->opaque_fid << 16 | p_data->cid;
+ struct qed_spq_entry *p_ent = NULL;
+ int rc;
+
+ if (!pp_ent)
+ return -ENOMEM;
+
+ rc = qed_spq_get_entry(p_hwfn, pp_ent);
+
+ if (rc)
+ return rc;
+
+ p_ent = *pp_ent;
+
+ p_ent->elem.hdr.cid = cpu_to_le32(opaque_cid);
+ p_ent->elem.hdr.cmd_id = cmd;
+ p_ent->elem.hdr.protocol_id = protocol;
+
+ p_ent->priority = QED_SPQ_PRIORITY_NORMAL;
+ p_ent->comp_mode = p_data->comp_mode;
+ p_ent->comp_done.done = 0;
+
+ switch (p_ent->comp_mode) {
+ case QED_SPQ_MODE_EBLOCK:
+ p_ent->comp_cb.cookie = &p_ent->comp_done;
+ break;
+
+ case QED_SPQ_MODE_BLOCK:
+ if (!p_data->p_comp_data)
+ goto err;
+
+ p_ent->comp_cb.cookie = p_data->p_comp_data->cookie;
+ break;
+
+ case QED_SPQ_MODE_CB:
+ if (!p_data->p_comp_data)
+ p_ent->comp_cb.function = NULL;
+ else
+ p_ent->comp_cb = *p_data->p_comp_data;
+ break;
+
+ default:
+ DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
+ p_ent->comp_mode);
+ goto err;
+ }
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SPQ,
+ "Initialized: CID %08x %s:[%02x] %s:%02x data_addr %llx comp_mode [%s]\n",
+ opaque_cid, qed_get_ramrod_cmd_id_str(protocol, cmd),
+ cmd, qed_get_protocol_type_str(protocol), protocol,
+ (unsigned long long)(uintptr_t)&p_ent->ramrod,
+ D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
+ QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
+ "MODE_CB"));
+
+ memset(&p_ent->ramrod, 0, sizeof(p_ent->ramrod));
+
+ return 0;
+
+err:
+ qed_sp_destroy_request(p_hwfn, p_ent);
+
+ return -EINVAL;
+}
+
+static enum tunnel_clss qed_tunn_clss_to_fw_clss(u8 type)
+{
+ switch (type) {
+ case QED_TUNN_CLSS_MAC_VLAN:
+ return TUNNEL_CLSS_MAC_VLAN;
+ case QED_TUNN_CLSS_MAC_VNI:
+ return TUNNEL_CLSS_MAC_VNI;
+ case QED_TUNN_CLSS_INNER_MAC_VLAN:
+ return TUNNEL_CLSS_INNER_MAC_VLAN;
+ case QED_TUNN_CLSS_INNER_MAC_VNI:
+ return TUNNEL_CLSS_INNER_MAC_VNI;
+ case QED_TUNN_CLSS_MAC_VLAN_DUAL_STAGE:
+ return TUNNEL_CLSS_MAC_VLAN_DUAL_STAGE;
+ default:
+ return TUNNEL_CLSS_MAC_VLAN;
+ }
+}
+
+static void
+qed_set_pf_update_tunn_mode(struct qed_tunnel_info *p_tun,
+ struct qed_tunnel_info *p_src, bool b_pf_start)
+{
+ if (p_src->vxlan.b_update_mode || b_pf_start)
+ p_tun->vxlan.b_mode_enabled = p_src->vxlan.b_mode_enabled;
+
+ if (p_src->l2_gre.b_update_mode || b_pf_start)
+ p_tun->l2_gre.b_mode_enabled = p_src->l2_gre.b_mode_enabled;
+
+ if (p_src->ip_gre.b_update_mode || b_pf_start)
+ p_tun->ip_gre.b_mode_enabled = p_src->ip_gre.b_mode_enabled;
+
+ if (p_src->l2_geneve.b_update_mode || b_pf_start)
+ p_tun->l2_geneve.b_mode_enabled =
+ p_src->l2_geneve.b_mode_enabled;
+
+ if (p_src->ip_geneve.b_update_mode || b_pf_start)
+ p_tun->ip_geneve.b_mode_enabled =
+ p_src->ip_geneve.b_mode_enabled;
+}
+
+static void qed_set_tunn_cls_info(struct qed_tunnel_info *p_tun,
+ struct qed_tunnel_info *p_src)
+{
+ int type;
+
+ p_tun->b_update_rx_cls = p_src->b_update_rx_cls;
+ p_tun->b_update_tx_cls = p_src->b_update_tx_cls;
+
+ type = qed_tunn_clss_to_fw_clss(p_src->vxlan.tun_cls);
+ p_tun->vxlan.tun_cls = type;
+ type = qed_tunn_clss_to_fw_clss(p_src->l2_gre.tun_cls);
+ p_tun->l2_gre.tun_cls = type;
+ type = qed_tunn_clss_to_fw_clss(p_src->ip_gre.tun_cls);
+ p_tun->ip_gre.tun_cls = type;
+ type = qed_tunn_clss_to_fw_clss(p_src->l2_geneve.tun_cls);
+ p_tun->l2_geneve.tun_cls = type;
+ type = qed_tunn_clss_to_fw_clss(p_src->ip_geneve.tun_cls);
+ p_tun->ip_geneve.tun_cls = type;
+}
+
+static void qed_set_tunn_ports(struct qed_tunnel_info *p_tun,
+ struct qed_tunnel_info *p_src)
+{
+ p_tun->geneve_port.b_update_port = p_src->geneve_port.b_update_port;
+ p_tun->vxlan_port.b_update_port = p_src->vxlan_port.b_update_port;
+
+ if (p_src->geneve_port.b_update_port)
+ p_tun->geneve_port.port = p_src->geneve_port.port;
+
+ if (p_src->vxlan_port.b_update_port)
+ p_tun->vxlan_port.port = p_src->vxlan_port.port;
+}
+
+static void
+__qed_set_ramrod_tunnel_param(u8 *p_tunn_cls,
+ struct qed_tunn_update_type *tun_type)
+{
+ *p_tunn_cls = tun_type->tun_cls;
+}
+
+static void
+qed_set_ramrod_tunnel_param(u8 *p_tunn_cls,
+ struct qed_tunn_update_type *tun_type,
+ u8 *p_update_port,
+ __le16 *p_port,
+ struct qed_tunn_update_udp_port *p_udp_port)
+{
+ __qed_set_ramrod_tunnel_param(p_tunn_cls, tun_type);
+ if (p_udp_port->b_update_port) {
+ *p_update_port = 1;
+ *p_port = cpu_to_le16(p_udp_port->port);
+ }
+}
+
+static void
+qed_tunn_set_pf_update_params(struct qed_hwfn *p_hwfn,
+ struct qed_tunnel_info *p_src,
+ struct pf_update_tunnel_config *p_tunn_cfg)
+{
+ struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel;
+
+ qed_set_pf_update_tunn_mode(p_tun, p_src, false);
+ qed_set_tunn_cls_info(p_tun, p_src);
+ qed_set_tunn_ports(p_tun, p_src);
+
+ qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
+ &p_tun->vxlan,
+ &p_tunn_cfg->set_vxlan_udp_port_flg,
+ &p_tunn_cfg->vxlan_udp_port,
+ &p_tun->vxlan_port);
+
+ qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
+ &p_tun->l2_geneve,
+ &p_tunn_cfg->set_geneve_udp_port_flg,
+ &p_tunn_cfg->geneve_udp_port,
+ &p_tun->geneve_port);
+
+ __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
+ &p_tun->ip_geneve);
+
+ __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
+ &p_tun->l2_gre);
+
+ __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
+ &p_tun->ip_gre);
+
+ p_tunn_cfg->update_rx_pf_clss = p_tun->b_update_rx_cls;
+}
+
+static void qed_set_hw_tunn_mode(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_tunnel_info *p_tun)
+{
+ qed_set_gre_enable(p_hwfn, p_ptt, p_tun->l2_gre.b_mode_enabled,
+ p_tun->ip_gre.b_mode_enabled);
+ qed_set_vxlan_enable(p_hwfn, p_ptt, p_tun->vxlan.b_mode_enabled);
+
+ qed_set_geneve_enable(p_hwfn, p_ptt, p_tun->l2_geneve.b_mode_enabled,
+ p_tun->ip_geneve.b_mode_enabled);
+}
+
+static void qed_set_hw_tunn_mode_port(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_tunnel_info *p_tunn)
+{
+ if (p_tunn->vxlan_port.b_update_port)
+ qed_set_vxlan_dest_port(p_hwfn, p_ptt,
+ p_tunn->vxlan_port.port);
+
+ if (p_tunn->geneve_port.b_update_port)
+ qed_set_geneve_dest_port(p_hwfn, p_ptt,
+ p_tunn->geneve_port.port);
+
+ qed_set_hw_tunn_mode(p_hwfn, p_ptt, p_tunn);
+}
+
+static void
+qed_tunn_set_pf_start_params(struct qed_hwfn *p_hwfn,
+ struct qed_tunnel_info *p_src,
+ struct pf_start_tunnel_config *p_tunn_cfg)
+{
+ struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel;
+
+ if (!p_src)
+ return;
+
+ qed_set_pf_update_tunn_mode(p_tun, p_src, true);
+ qed_set_tunn_cls_info(p_tun, p_src);
+ qed_set_tunn_ports(p_tun, p_src);
+
+ qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
+ &p_tun->vxlan,
+ &p_tunn_cfg->set_vxlan_udp_port_flg,
+ &p_tunn_cfg->vxlan_udp_port,
+ &p_tun->vxlan_port);
+
+ qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
+ &p_tun->l2_geneve,
+ &p_tunn_cfg->set_geneve_udp_port_flg,
+ &p_tunn_cfg->geneve_udp_port,
+ &p_tun->geneve_port);
+
+ __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
+ &p_tun->ip_geneve);
+
+ __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
+ &p_tun->l2_gre);
+
+ __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
+ &p_tun->ip_gre);
+}
+
+int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_tunnel_info *p_tunn,
+ bool allow_npar_tx_switch)
+{
+ struct outer_tag_config_struct *outer_tag_config;
+ struct pf_start_ramrod_data *p_ramrod = NULL;
+ u16 sb = qed_int_get_sp_sb_id(p_hwfn);
+ u8 sb_index = p_hwfn->p_eq->eq_sb_index;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ u8 page_cnt, i;
+ int rc;
+
+ /* update initial eq producer */
+ qed_eq_prod_update(p_hwfn,
+ qed_chain_get_prod_idx(&p_hwfn->p_eq->chain));
+
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_PF_START,
+ PROTOCOLID_COMMON, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.pf_start;
+
+ p_ramrod->event_ring_sb_id = cpu_to_le16(sb);
+ p_ramrod->event_ring_sb_index = sb_index;
+ p_ramrod->path_id = QED_PATH_ID(p_hwfn);
+ p_ramrod->dont_log_ramrods = 0;
+ p_ramrod->log_type_mask = cpu_to_le16(0xf);
+
+ if (test_bit(QED_MF_OVLAN_CLSS, &p_hwfn->cdev->mf_bits))
+ p_ramrod->mf_mode = MF_OVLAN;
+ else
+ p_ramrod->mf_mode = MF_NPAR;
+
+ outer_tag_config = &p_ramrod->outer_tag_config;
+ outer_tag_config->outer_tag.tci = cpu_to_le16(p_hwfn->hw_info.ovlan);
+
+ if (test_bit(QED_MF_8021Q_TAGGING, &p_hwfn->cdev->mf_bits)) {
+ outer_tag_config->outer_tag.tpid = cpu_to_le16(ETH_P_8021Q);
+ } else if (test_bit(QED_MF_8021AD_TAGGING, &p_hwfn->cdev->mf_bits)) {
+ outer_tag_config->outer_tag.tpid = cpu_to_le16(ETH_P_8021AD);
+ outer_tag_config->enable_stag_pri_change = 1;
+ }
+
+ outer_tag_config->pri_map_valid = 1;
+ for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++)
+ outer_tag_config->inner_to_outer_pri_map[i] = i;
+
+ /* enable_stag_pri_change should be set if port is in BD mode or,
+ * UFP with Host Control mode.
+ */
+ if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits)) {
+ if (p_hwfn->ufp_info.pri_type == QED_UFP_PRI_OS)
+ outer_tag_config->enable_stag_pri_change = 1;
+ else
+ outer_tag_config->enable_stag_pri_change = 0;
+
+ outer_tag_config->outer_tag.tci |=
+ cpu_to_le16(((u16)p_hwfn->ufp_info.tc << 13));
+ }
+
+ /* Place EQ address in RAMROD */
+ DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
+ qed_chain_get_pbl_phys(&p_hwfn->p_eq->chain));
+ page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_eq->chain);
+ p_ramrod->event_ring_num_pages = page_cnt;
+
+ /* Place consolidation queue address in ramrod */
+ DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_base_addr,
+ qed_chain_get_pbl_phys(&p_hwfn->p_consq->chain));
+ page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_consq->chain);
+ p_ramrod->consolid_q_num_pages = page_cnt;
+
+ qed_tunn_set_pf_start_params(p_hwfn, p_tunn, &p_ramrod->tunnel_config);
+
+ if (test_bit(QED_MF_INTER_PF_SWITCH, &p_hwfn->cdev->mf_bits))
+ p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch;
+
+ switch (p_hwfn->hw_info.personality) {
+ case QED_PCI_ETH:
+ p_ramrod->personality = PERSONALITY_ETH;
+ break;
+ case QED_PCI_FCOE:
+ p_ramrod->personality = PERSONALITY_FCOE;
+ break;
+ case QED_PCI_ISCSI:
+ case QED_PCI_NVMETCP:
+ p_ramrod->personality = PERSONALITY_TCP_ULP;
+ break;
+ case QED_PCI_ETH_ROCE:
+ case QED_PCI_ETH_IWARP:
+ p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "Unknown personality %d\n",
+ p_hwfn->hw_info.personality);
+ p_ramrod->personality = PERSONALITY_ETH;
+ }
+
+ if (p_hwfn->cdev->p_iov_info) {
+ struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
+
+ p_ramrod->base_vf_id = (u8)p_iov->first_vf_in_pf;
+ p_ramrod->num_vfs = (u8)p_iov->total_vfs;
+ }
+ p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
+ p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+ "Setting event_ring_sb [id %04x index %02x], outer_tag.tci [%d]\n",
+ sb, sb_index, outer_tag_config->outer_tag.tci);
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+ if (p_tunn)
+ qed_set_hw_tunn_mode_port(p_hwfn, p_ptt,
+ &p_hwfn->cdev->tunnel);
+
+ return rc;
+}
+
+int qed_sp_pf_update(struct qed_hwfn *p_hwfn)
+{
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_CB;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
+ &init_data);
+ if (rc)
+ return rc;
+
+ qed_dcbx_set_pf_update_params(&p_hwfn->p_dcbx_info->results,
+ &p_ent->ramrod.pf_update);
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+int qed_sp_pf_update_ufp(struct qed_hwfn *p_hwfn)
+{
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc;
+
+ if (p_hwfn->ufp_info.pri_type == QED_UFP_PRI_UNKNOWN) {
+ DP_INFO(p_hwfn, "Invalid priority type %d\n",
+ p_hwfn->ufp_info.pri_type);
+ return -EINVAL;
+ }
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_CB;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
+ &init_data);
+ if (rc)
+ return rc;
+
+ p_ent->ramrod.pf_update.update_enable_stag_pri_change = true;
+ if (p_hwfn->ufp_info.pri_type == QED_UFP_PRI_OS)
+ p_ent->ramrod.pf_update.enable_stag_pri_change = 1;
+ else
+ p_ent->ramrod.pf_update.enable_stag_pri_change = 0;
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+/* Set pf update ramrod command params */
+int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_tunnel_info *p_tunn,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data)
+{
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc;
+
+ if (IS_VF(p_hwfn->cdev))
+ return qed_vf_pf_tunnel_param_update(p_hwfn, p_tunn);
+
+ if (!p_tunn)
+ return -EINVAL;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_data;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
+ &init_data);
+ if (rc)
+ return rc;
+
+ qed_tunn_set_pf_update_params(p_hwfn, p_tunn,
+ &p_ent->ramrod.pf_update.tunnel_config);
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc)
+ return rc;
+
+ qed_set_hw_tunn_mode_port(p_hwfn, p_ptt, &p_hwfn->cdev->tunnel);
+
+ return rc;
+}
+
+int qed_sp_pf_stop(struct qed_hwfn *p_hwfn)
+{
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_PF_STOP, PROTOCOLID_COMMON,
+ &init_data);
+ if (rc)
+ return rc;
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+int qed_sp_heartbeat_ramrod(struct qed_hwfn *p_hwfn)
+{
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_EMPTY, PROTOCOLID_COMMON,
+ &init_data);
+ if (rc)
+ return rc;
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+int qed_sp_pf_update_stag(struct qed_hwfn *p_hwfn)
+{
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_CB;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
+ &init_data);
+ if (rc)
+ return rc;
+
+ p_ent->ramrod.pf_update.update_mf_vlan_flag = true;
+ p_ent->ramrod.pf_update.mf_vlan = cpu_to_le16(p_hwfn->hw_info.ovlan);
+ if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
+ p_ent->ramrod.pf_update.mf_vlan |=
+ cpu_to_le16(((u16)p_hwfn->ufp_info.tc << 13));
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
new file mode 100644
index 000000000..d01b9245f
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -0,0 +1,1051 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ * Copyright (c) 2019-2020 Marvell International Ltd.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include "qed.h"
+#include "qed_cxt.h"
+#include "qed_dev_api.h"
+#include "qed_hsi.h"
+#include "qed_iro_hsi.h"
+#include "qed_hw.h"
+#include "qed_int.h"
+#include "qed_iscsi.h"
+#include "qed_mcp.h"
+#include "qed_ooo.h"
+#include "qed_reg_addr.h"
+#include "qed_sp.h"
+#include "qed_sriov.h"
+#include "qed_rdma.h"
+
+/***************************************************************************
+ * Structures & Definitions
+ ***************************************************************************/
+
+#define SPQ_HIGH_PRI_RESERVE_DEFAULT (1)
+
+#define SPQ_BLOCK_DELAY_MAX_ITER (10)
+#define SPQ_BLOCK_DELAY_US (10)
+#define SPQ_BLOCK_SLEEP_MAX_ITER (1000)
+#define SPQ_BLOCK_SLEEP_MS (5)
+
+/***************************************************************************
+ * Blocking Imp. (BLOCK/EBLOCK mode)
+ ***************************************************************************/
+static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
+ void *cookie,
+ union event_ring_data *data, u8 fw_return_code)
+{
+ struct qed_spq_comp_done *comp_done;
+
+ comp_done = (struct qed_spq_comp_done *)cookie;
+
+ comp_done->fw_return_code = fw_return_code;
+
+ /* Make sure completion done is visible on waiting thread */
+ smp_store_release(&comp_done->done, 0x1);
+}
+
+static int __qed_spq_block(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent,
+ u8 *p_fw_ret, bool sleep_between_iter)
+{
+ struct qed_spq_comp_done *comp_done;
+ u32 iter_cnt;
+
+ comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
+ iter_cnt = sleep_between_iter ? SPQ_BLOCK_SLEEP_MAX_ITER
+ : SPQ_BLOCK_DELAY_MAX_ITER;
+
+ while (iter_cnt--) {
+ /* Validate we receive completion update */
+ if (smp_load_acquire(&comp_done->done) == 1) { /* ^^^ */
+ if (p_fw_ret)
+ *p_fw_ret = comp_done->fw_return_code;
+ return 0;
+ }
+
+ if (sleep_between_iter)
+ msleep(SPQ_BLOCK_SLEEP_MS);
+ else
+ udelay(SPQ_BLOCK_DELAY_US);
+ }
+
+ return -EBUSY;
+}
+
+static int qed_spq_block(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent,
+ u8 *p_fw_ret, bool skip_quick_poll)
+{
+ struct qed_spq_comp_done *comp_done;
+ struct qed_ptt *p_ptt;
+ int rc;
+
+ /* A relatively short polling period w/o sleeping, to allow the FW to
+ * complete the ramrod and thus possibly to avoid the following sleeps.
+ */
+ if (!skip_quick_poll) {
+ rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, false);
+ if (!rc)
+ return 0;
+ }
+
+ /* Move to polling with a sleeping period between iterations */
+ rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
+ if (!rc)
+ return 0;
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ DP_NOTICE(p_hwfn, "ptt, failed to acquire\n");
+ return -EAGAIN;
+ }
+
+ DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
+ rc = qed_mcp_drain(p_hwfn, p_ptt);
+ qed_ptt_release(p_hwfn, p_ptt);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "MCP drain failed\n");
+ goto err;
+ }
+
+ /* Retry after drain */
+ rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
+ if (!rc)
+ return 0;
+
+ comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
+ if (comp_done->done == 1) {
+ if (p_fw_ret)
+ *p_fw_ret = comp_done->fw_return_code;
+ return 0;
+ }
+err:
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EBUSY;
+ qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_RAMROD_FAIL,
+ "Ramrod is stuck [CID %08x %s:%02x %s:%02x echo %04x]\n",
+ le32_to_cpu(p_ent->elem.hdr.cid),
+ qed_get_ramrod_cmd_id_str(p_ent->elem.hdr.protocol_id,
+ p_ent->elem.hdr.cmd_id),
+ p_ent->elem.hdr.cmd_id,
+ qed_get_protocol_type_str(p_ent->elem.hdr.protocol_id),
+ p_ent->elem.hdr.protocol_id,
+ le16_to_cpu(p_ent->elem.hdr.echo));
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ return -EBUSY;
+}
+
+/***************************************************************************
+ * SPQ entries inner API
+ ***************************************************************************/
+static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent)
+{
+ p_ent->flags = 0;
+
+ switch (p_ent->comp_mode) {
+ case QED_SPQ_MODE_EBLOCK:
+ case QED_SPQ_MODE_BLOCK:
+ p_ent->comp_cb.function = qed_spq_blocking_cb;
+ break;
+ case QED_SPQ_MODE_CB:
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
+ p_ent->comp_mode);
+ return -EINVAL;
+ }
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SPQ,
+ "Ramrod hdr: [CID 0x%08x %s:0x%02x %s:0x%02x] Data ptr: [%08x:%08x] Cmpltion Mode: %s\n",
+ p_ent->elem.hdr.cid,
+ qed_get_ramrod_cmd_id_str(p_ent->elem.hdr.protocol_id,
+ p_ent->elem.hdr.cmd_id),
+ p_ent->elem.hdr.cmd_id,
+ qed_get_protocol_type_str(p_ent->elem.hdr.protocol_id),
+ p_ent->elem.hdr.protocol_id,
+ p_ent->elem.data_ptr.hi, p_ent->elem.data_ptr.lo,
+ D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
+ QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
+ "MODE_CB"));
+
+ return 0;
+}
+
+/***************************************************************************
+ * HSI access
+ ***************************************************************************/
+static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
+ struct qed_spq *p_spq)
+{
+ struct core_conn_context *p_cxt;
+ struct qed_cxt_info cxt_info;
+ u16 physical_q;
+ int rc;
+
+ cxt_info.iid = p_spq->cid;
+
+ rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
+
+ if (rc < 0) {
+ DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n",
+ p_spq->cid);
+ return;
+ }
+
+ p_cxt = cxt_info.p_cxt;
+
+ SET_FIELD(p_cxt->xstorm_ag_context.flags10,
+ XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
+ SET_FIELD(p_cxt->xstorm_ag_context.flags1,
+ XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
+ SET_FIELD(p_cxt->xstorm_ag_context.flags9,
+ XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
+
+ /* QM physical queue */
+ physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
+ p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(physical_q);
+
+ p_cxt->xstorm_st_context.spq_base_addr.lo =
+ DMA_LO_LE(p_spq->chain.p_phys_addr);
+ p_cxt->xstorm_st_context.spq_base_addr.hi =
+ DMA_HI_LE(p_spq->chain.p_phys_addr);
+}
+
+static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
+ struct qed_spq *p_spq, struct qed_spq_entry *p_ent)
+{
+ struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
+ struct core_db_data *p_db_data = &p_spq->db_data;
+ u16 echo = qed_chain_get_prod_idx(p_chain);
+ struct slow_path_element *elem;
+
+ p_ent->elem.hdr.echo = cpu_to_le16(echo);
+ elem = qed_chain_produce(p_chain);
+ if (!elem) {
+ DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n");
+ return -EINVAL;
+ }
+
+ *elem = p_ent->elem; /* struct assignment */
+
+ /* send a doorbell on the slow hwfn session */
+ p_db_data->spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
+
+ /* make sure the SPQE is updated before the doorbell */
+ wmb();
+
+ DOORBELL(p_hwfn, p_spq->db_addr_offset, *(u32 *)p_db_data);
+
+ /* make sure doorbell is rang */
+ wmb();
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+ "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
+ p_spq->db_addr_offset,
+ p_spq->cid,
+ p_db_data->params,
+ p_db_data->agg_flags, qed_chain_get_prod_idx(p_chain));
+
+ return 0;
+}
+
+/***************************************************************************
+ * Asynchronous events
+ ***************************************************************************/
+static int
+qed_async_event_completion(struct qed_hwfn *p_hwfn,
+ struct event_ring_entry *p_eqe)
+{
+ qed_spq_async_comp_cb cb;
+
+ if (!p_hwfn->p_spq)
+ return -EINVAL;
+
+ if (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE) {
+ DP_ERR(p_hwfn, "Wrong protocol: %s:%d\n",
+ qed_get_protocol_type_str(p_eqe->protocol_id),
+ p_eqe->protocol_id);
+
+ return -EINVAL;
+ }
+
+ cb = p_hwfn->p_spq->async_comp_cb[p_eqe->protocol_id];
+ if (cb) {
+ return cb(p_hwfn, p_eqe->opcode, p_eqe->echo,
+ &p_eqe->data, p_eqe->fw_return_code);
+ } else {
+ DP_NOTICE(p_hwfn,
+ "Unknown Async completion for %s:%d\n",
+ qed_get_protocol_type_str(p_eqe->protocol_id),
+ p_eqe->protocol_id);
+
+ return -EINVAL;
+ }
+}
+
+int
+qed_spq_register_async_cb(struct qed_hwfn *p_hwfn,
+ enum protocol_type protocol_id,
+ qed_spq_async_comp_cb cb)
+{
+ if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
+ return -EINVAL;
+
+ p_hwfn->p_spq->async_comp_cb[protocol_id] = cb;
+ return 0;
+}
+
+void
+qed_spq_unregister_async_cb(struct qed_hwfn *p_hwfn,
+ enum protocol_type protocol_id)
+{
+ if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
+ return;
+
+ p_hwfn->p_spq->async_comp_cb[protocol_id] = NULL;
+}
+
+/***************************************************************************
+ * EQ API
+ ***************************************************************************/
+void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod)
+{
+ u32 addr = GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_USDM_RAM,
+ USTORM_EQE_CONS, p_hwfn->rel_pf_id);
+
+ REG_WR16(p_hwfn, addr, prod);
+}
+
+int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie)
+{
+ struct qed_eq *p_eq = cookie;
+ struct qed_chain *p_chain = &p_eq->chain;
+ int rc = 0;
+
+ /* take a snapshot of the FW consumer */
+ u16 fw_cons_idx = le16_to_cpu(*p_eq->p_fw_cons);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
+
+ /* Need to guarantee the fw_cons index we use points to a usuable
+ * element (to comply with our chain), so our macros would comply
+ */
+ if ((fw_cons_idx & qed_chain_get_usable_per_page(p_chain)) ==
+ qed_chain_get_usable_per_page(p_chain))
+ fw_cons_idx += qed_chain_get_unusable_per_page(p_chain);
+
+ /* Complete current segment of eq entries */
+ while (fw_cons_idx != qed_chain_get_cons_idx(p_chain)) {
+ struct event_ring_entry *p_eqe = qed_chain_consume(p_chain);
+
+ if (!p_eqe) {
+ rc = -EINVAL;
+ break;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+ "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
+ p_eqe->opcode,
+ p_eqe->protocol_id,
+ p_eqe->reserved0,
+ le16_to_cpu(p_eqe->echo),
+ p_eqe->fw_return_code,
+ p_eqe->flags);
+
+ if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
+ if (qed_async_event_completion(p_hwfn, p_eqe))
+ rc = -EINVAL;
+ } else if (qed_spq_completion(p_hwfn,
+ p_eqe->echo,
+ p_eqe->fw_return_code,
+ &p_eqe->data)) {
+ rc = -EINVAL;
+ }
+
+ qed_chain_recycle_consumed(p_chain);
+ }
+
+ qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
+
+ /* Attempt to post pending requests */
+ spin_lock_bh(&p_hwfn->p_spq->lock);
+ rc = qed_spq_pend_post(p_hwfn);
+ spin_unlock_bh(&p_hwfn->p_spq->lock);
+
+ return rc;
+}
+
+int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem)
+{
+ struct qed_chain_init_params params = {
+ .mode = QED_CHAIN_MODE_PBL,
+ .intended_use = QED_CHAIN_USE_TO_PRODUCE,
+ .cnt_type = QED_CHAIN_CNT_TYPE_U16,
+ .num_elems = num_elem,
+ .elem_size = sizeof(union event_ring_element),
+ };
+ struct qed_eq *p_eq;
+ int ret;
+
+ /* Allocate EQ struct */
+ p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL);
+ if (!p_eq)
+ return -ENOMEM;
+
+ ret = qed_chain_alloc(p_hwfn->cdev, &p_eq->chain, &params);
+ if (ret) {
+ DP_NOTICE(p_hwfn, "Failed to allocate EQ chain\n");
+ goto eq_allocate_fail;
+ }
+
+ /* register EQ completion on the SP SB */
+ qed_int_register_cb(p_hwfn, qed_eq_completion,
+ p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
+
+ p_hwfn->p_eq = p_eq;
+ return 0;
+
+eq_allocate_fail:
+ kfree(p_eq);
+
+ return ret;
+}
+
+void qed_eq_setup(struct qed_hwfn *p_hwfn)
+{
+ qed_chain_reset(&p_hwfn->p_eq->chain);
+}
+
+void qed_eq_free(struct qed_hwfn *p_hwfn)
+{
+ if (!p_hwfn->p_eq)
+ return;
+
+ qed_chain_free(p_hwfn->cdev, &p_hwfn->p_eq->chain);
+
+ kfree(p_hwfn->p_eq);
+ p_hwfn->p_eq = NULL;
+}
+
+/***************************************************************************
+ * CQE API - manipulate EQ functionality
+ ***************************************************************************/
+static int qed_cqe_completion(struct qed_hwfn *p_hwfn,
+ struct eth_slow_path_rx_cqe *cqe,
+ enum protocol_type protocol)
+{
+ if (IS_VF(p_hwfn->cdev))
+ return 0;
+
+ /* @@@tmp - it's possible we'll eventually want to handle some
+ * actual commands that can arrive here, but for now this is only
+ * used to complete the ramrod using the echo value on the cqe
+ */
+ return qed_spq_completion(p_hwfn, cqe->echo, 0, NULL);
+}
+
+int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
+ struct eth_slow_path_rx_cqe *cqe)
+{
+ int rc;
+
+ rc = qed_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
+ if (rc)
+ DP_NOTICE(p_hwfn,
+ "Failed to handle RXQ CQE [cmd 0x%02x]\n",
+ cqe->ramrod_cmd_id);
+
+ return rc;
+}
+
+/***************************************************************************
+ * Slow hwfn Queue (spq)
+ ***************************************************************************/
+void qed_spq_setup(struct qed_hwfn *p_hwfn)
+{
+ struct qed_spq *p_spq = p_hwfn->p_spq;
+ struct qed_spq_entry *p_virt = NULL;
+ struct core_db_data *p_db_data;
+ void __iomem *db_addr;
+ dma_addr_t p_phys = 0;
+ u32 i, capacity;
+ int rc;
+
+ INIT_LIST_HEAD(&p_spq->pending);
+ INIT_LIST_HEAD(&p_spq->completion_pending);
+ INIT_LIST_HEAD(&p_spq->free_pool);
+ INIT_LIST_HEAD(&p_spq->unlimited_pending);
+ spin_lock_init(&p_spq->lock);
+
+ /* SPQ empty pool */
+ p_phys = p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod);
+ p_virt = p_spq->p_virt;
+
+ capacity = qed_chain_get_capacity(&p_spq->chain);
+ for (i = 0; i < capacity; i++) {
+ DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
+
+ list_add_tail(&p_virt->list, &p_spq->free_pool);
+
+ p_virt++;
+ p_phys += sizeof(struct qed_spq_entry);
+ }
+
+ /* Statistics */
+ p_spq->normal_count = 0;
+ p_spq->comp_count = 0;
+ p_spq->comp_sent_count = 0;
+ p_spq->unlimited_pending_count = 0;
+
+ bitmap_zero(p_spq->p_comp_bitmap, SPQ_RING_SIZE);
+ p_spq->comp_bitmap_idx = 0;
+
+ /* SPQ cid, cannot fail */
+ qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
+ qed_spq_hw_initialize(p_hwfn, p_spq);
+
+ /* reset the chain itself */
+ qed_chain_reset(&p_spq->chain);
+
+ /* Initialize the address/data of the SPQ doorbell */
+ p_spq->db_addr_offset = qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY);
+ p_db_data = &p_spq->db_data;
+ memset(p_db_data, 0, sizeof(*p_db_data));
+ SET_FIELD(p_db_data->params, CORE_DB_DATA_DEST, DB_DEST_XCM);
+ SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_MAX);
+ SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_VAL_SEL,
+ DQ_XCM_CORE_SPQ_PROD_CMD);
+ p_db_data->agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
+
+ /* Register the SPQ doorbell with the doorbell recovery mechanism */
+ db_addr = (void __iomem *)((u8 __iomem *)p_hwfn->doorbells +
+ p_spq->db_addr_offset);
+ rc = qed_db_recovery_add(p_hwfn->cdev, db_addr, &p_spq->db_data,
+ DB_REC_WIDTH_32B, DB_REC_KERNEL);
+ if (rc)
+ DP_INFO(p_hwfn,
+ "Failed to register the SPQ doorbell with the doorbell recovery mechanism\n");
+}
+
+int qed_spq_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_chain_init_params params = {
+ .mode = QED_CHAIN_MODE_SINGLE,
+ .intended_use = QED_CHAIN_USE_TO_PRODUCE,
+ .cnt_type = QED_CHAIN_CNT_TYPE_U16,
+ .elem_size = sizeof(struct slow_path_element),
+ };
+ struct qed_dev *cdev = p_hwfn->cdev;
+ struct qed_spq_entry *p_virt = NULL;
+ struct qed_spq *p_spq = NULL;
+ dma_addr_t p_phys = 0;
+ u32 capacity;
+ int ret;
+
+ /* SPQ struct */
+ p_spq = kzalloc(sizeof(*p_spq), GFP_KERNEL);
+ if (!p_spq)
+ return -ENOMEM;
+
+ /* SPQ ring */
+ ret = qed_chain_alloc(cdev, &p_spq->chain, &params);
+ if (ret) {
+ DP_NOTICE(p_hwfn, "Failed to allocate SPQ chain\n");
+ goto spq_chain_alloc_fail;
+ }
+
+ /* allocate and fill the SPQ elements (incl. ramrod data list) */
+ capacity = qed_chain_get_capacity(&p_spq->chain);
+ ret = -ENOMEM;
+
+ p_virt = dma_alloc_coherent(&cdev->pdev->dev,
+ capacity * sizeof(struct qed_spq_entry),
+ &p_phys, GFP_KERNEL);
+ if (!p_virt)
+ goto spq_alloc_fail;
+
+ p_spq->p_virt = p_virt;
+ p_spq->p_phys = p_phys;
+ p_hwfn->p_spq = p_spq;
+
+ return 0;
+
+spq_alloc_fail:
+ qed_chain_free(cdev, &p_spq->chain);
+spq_chain_alloc_fail:
+ kfree(p_spq);
+
+ return ret;
+}
+
+void qed_spq_free(struct qed_hwfn *p_hwfn)
+{
+ struct qed_spq *p_spq = p_hwfn->p_spq;
+ void __iomem *db_addr;
+ u32 capacity;
+
+ if (!p_spq)
+ return;
+
+ /* Delete the SPQ doorbell from the doorbell recovery mechanism */
+ db_addr = (void __iomem *)((u8 __iomem *)p_hwfn->doorbells +
+ p_spq->db_addr_offset);
+ qed_db_recovery_del(p_hwfn->cdev, db_addr, &p_spq->db_data);
+
+ if (p_spq->p_virt) {
+ capacity = qed_chain_get_capacity(&p_spq->chain);
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ capacity *
+ sizeof(struct qed_spq_entry),
+ p_spq->p_virt, p_spq->p_phys);
+ }
+
+ qed_chain_free(p_hwfn->cdev, &p_spq->chain);
+ kfree(p_spq);
+ p_hwfn->p_spq = NULL;
+}
+
+int qed_spq_get_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent)
+{
+ struct qed_spq *p_spq = p_hwfn->p_spq;
+ struct qed_spq_entry *p_ent = NULL;
+ int rc = 0;
+
+ spin_lock_bh(&p_spq->lock);
+
+ if (list_empty(&p_spq->free_pool)) {
+ p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC);
+ if (!p_ent) {
+ DP_NOTICE(p_hwfn,
+ "Failed to allocate an SPQ entry for a pending ramrod\n");
+ rc = -ENOMEM;
+ goto out_unlock;
+ }
+ p_ent->queue = &p_spq->unlimited_pending;
+ } else {
+ p_ent = list_first_entry(&p_spq->free_pool,
+ struct qed_spq_entry, list);
+ list_del(&p_ent->list);
+ p_ent->queue = &p_spq->pending;
+ }
+
+ *pp_ent = p_ent;
+
+out_unlock:
+ spin_unlock_bh(&p_spq->lock);
+ return rc;
+}
+
+/* Locked variant; Should be called while the SPQ lock is taken */
+static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent)
+{
+ list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool);
+}
+
+void qed_spq_return_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent)
+{
+ spin_lock_bh(&p_hwfn->p_spq->lock);
+ __qed_spq_return_entry(p_hwfn, p_ent);
+ spin_unlock_bh(&p_hwfn->p_spq->lock);
+}
+
+/**
+ * qed_spq_add_entry() - Add a new entry to the pending list.
+ * Should be used while lock is being held.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ent: An entry to add.
+ * @priority: Desired priority.
+ *
+ * Adds an entry to the pending list is there is room (an empty
+ * element is available in the free_pool), or else places the
+ * entry in the unlimited_pending pool.
+ *
+ * Return: zero on success, -EINVAL on invalid @priority.
+ */
+static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent,
+ enum spq_priority priority)
+{
+ struct qed_spq *p_spq = p_hwfn->p_spq;
+
+ if (p_ent->queue == &p_spq->unlimited_pending) {
+ if (list_empty(&p_spq->free_pool)) {
+ list_add_tail(&p_ent->list, &p_spq->unlimited_pending);
+ p_spq->unlimited_pending_count++;
+
+ return 0;
+ } else {
+ struct qed_spq_entry *p_en2;
+
+ p_en2 = list_first_entry(&p_spq->free_pool,
+ struct qed_spq_entry, list);
+ list_del(&p_en2->list);
+
+ /* Copy the ring element physical pointer to the new
+ * entry, since we are about to override the entire ring
+ * entry and don't want to lose the pointer.
+ */
+ p_ent->elem.data_ptr = p_en2->elem.data_ptr;
+
+ *p_en2 = *p_ent;
+
+ /* EBLOCK responsible to free the allocated p_ent */
+ if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK)
+ kfree(p_ent);
+ else
+ p_ent->post_ent = p_en2;
+
+ p_ent = p_en2;
+ }
+ }
+
+ /* entry is to be placed in 'pending' queue */
+ switch (priority) {
+ case QED_SPQ_PRIORITY_NORMAL:
+ list_add_tail(&p_ent->list, &p_spq->pending);
+ p_spq->normal_count++;
+ break;
+ case QED_SPQ_PRIORITY_HIGH:
+ list_add(&p_ent->list, &p_spq->pending);
+ p_spq->high_count++;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/***************************************************************************
+ * Accessor
+ ***************************************************************************/
+u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
+{
+ if (!p_hwfn->p_spq)
+ return 0xffffffff; /* illegal */
+ return p_hwfn->p_spq->cid;
+}
+
+/***************************************************************************
+ * Posting new Ramrods
+ ***************************************************************************/
+static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
+ struct list_head *head, u32 keep_reserve)
+{
+ struct qed_spq *p_spq = p_hwfn->p_spq;
+ int rc;
+
+ while (qed_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
+ !list_empty(head)) {
+ struct qed_spq_entry *p_ent =
+ list_first_entry(head, struct qed_spq_entry, list);
+ list_move_tail(&p_ent->list, &p_spq->completion_pending);
+ p_spq->comp_sent_count++;
+
+ rc = qed_spq_hw_post(p_hwfn, p_spq, p_ent);
+ if (rc) {
+ list_del(&p_ent->list);
+ __qed_spq_return_entry(p_hwfn, p_ent);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
+{
+ struct qed_spq *p_spq = p_hwfn->p_spq;
+ struct qed_spq_entry *p_ent = NULL;
+
+ while (!list_empty(&p_spq->free_pool)) {
+ if (list_empty(&p_spq->unlimited_pending))
+ break;
+
+ p_ent = list_first_entry(&p_spq->unlimited_pending,
+ struct qed_spq_entry, list);
+ if (!p_ent)
+ return -EINVAL;
+
+ list_del(&p_ent->list);
+
+ qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
+ }
+
+ return qed_spq_post_list(p_hwfn, &p_spq->pending,
+ SPQ_HIGH_PRI_RESERVE_DEFAULT);
+}
+
+static void qed_spq_recov_set_ret_code(struct qed_spq_entry *p_ent,
+ u8 *fw_return_code)
+{
+ if (!fw_return_code)
+ return;
+
+ if (p_ent->elem.hdr.protocol_id == PROTOCOLID_ROCE ||
+ p_ent->elem.hdr.protocol_id == PROTOCOLID_IWARP)
+ *fw_return_code = RDMA_RETURN_OK;
+}
+
+/* Avoid overriding of SPQ entries when getting out-of-order completions, by
+ * marking the completions in a bitmap and increasing the chain consumer only
+ * for the first successive completed entries.
+ */
+static void qed_spq_comp_bmap_update(struct qed_hwfn *p_hwfn, __le16 echo)
+{
+ u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
+ struct qed_spq *p_spq = p_hwfn->p_spq;
+
+ __set_bit(pos, p_spq->p_comp_bitmap);
+ while (test_bit(p_spq->comp_bitmap_idx,
+ p_spq->p_comp_bitmap)) {
+ __clear_bit(p_spq->comp_bitmap_idx,
+ p_spq->p_comp_bitmap);
+ p_spq->comp_bitmap_idx++;
+ qed_chain_return_produced(&p_spq->chain);
+ }
+}
+
+int qed_spq_post(struct qed_hwfn *p_hwfn,
+ struct qed_spq_entry *p_ent, u8 *fw_return_code)
+{
+ int rc = 0;
+ struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
+ bool b_ret_ent = true;
+ bool eblock;
+
+ if (!p_hwfn)
+ return -EINVAL;
+
+ if (!p_ent) {
+ DP_NOTICE(p_hwfn, "Got a NULL pointer\n");
+ return -EINVAL;
+ }
+
+ if (p_hwfn->cdev->recov_in_prog) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SPQ,
+ "Recovery is in progress. Skip spq post [%s:%02x %s:%02x]\n",
+ qed_get_ramrod_cmd_id_str(p_ent->elem.hdr.protocol_id,
+ p_ent->elem.hdr.cmd_id),
+ p_ent->elem.hdr.cmd_id,
+ qed_get_protocol_type_str(p_ent->elem.hdr.protocol_id),
+ p_ent->elem.hdr.protocol_id);
+
+ /* Let the flow complete w/o any error handling */
+ qed_spq_recov_set_ret_code(p_ent, fw_return_code);
+ return 0;
+ }
+
+ /* Complete the entry */
+ rc = qed_spq_fill_entry(p_hwfn, p_ent);
+
+ spin_lock_bh(&p_spq->lock);
+
+ /* Check return value after LOCK is taken for cleaner error flow */
+ if (rc)
+ goto spq_post_fail;
+
+ /* Check if entry is in block mode before qed_spq_add_entry,
+ * which might kfree p_ent.
+ */
+ eblock = (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK);
+
+ /* Add the request to the pending queue */
+ rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
+ if (rc)
+ goto spq_post_fail;
+
+ rc = qed_spq_pend_post(p_hwfn);
+ if (rc) {
+ /* Since it's possible that pending failed for a different
+ * entry [although unlikely], the failed entry was already
+ * dealt with; No need to return it here.
+ */
+ b_ret_ent = false;
+ goto spq_post_fail;
+ }
+
+ spin_unlock_bh(&p_spq->lock);
+
+ if (eblock) {
+ /* For entries in QED BLOCK mode, the completion code cannot
+ * perform the necessary cleanup - if it did, we couldn't
+ * access p_ent here to see whether it's successful or not.
+ * Thus, after gaining the answer perform the cleanup here.
+ */
+ rc = qed_spq_block(p_hwfn, p_ent, fw_return_code,
+ p_ent->queue == &p_spq->unlimited_pending);
+
+ if (p_ent->queue == &p_spq->unlimited_pending) {
+ struct qed_spq_entry *p_post_ent = p_ent->post_ent;
+
+ kfree(p_ent);
+
+ /* Return the entry which was actually posted */
+ p_ent = p_post_ent;
+ }
+
+ if (rc)
+ goto spq_post_fail2;
+
+ /* return to pool */
+ qed_spq_return_entry(p_hwfn, p_ent);
+ }
+ return rc;
+
+spq_post_fail2:
+ spin_lock_bh(&p_spq->lock);
+ list_del(&p_ent->list);
+ qed_spq_comp_bmap_update(p_hwfn, p_ent->elem.hdr.echo);
+
+spq_post_fail:
+ /* return to the free pool */
+ if (b_ret_ent)
+ __qed_spq_return_entry(p_hwfn, p_ent);
+ spin_unlock_bh(&p_spq->lock);
+
+ return rc;
+}
+
+int qed_spq_completion(struct qed_hwfn *p_hwfn,
+ __le16 echo,
+ u8 fw_return_code,
+ union event_ring_data *p_data)
+{
+ struct qed_spq *p_spq;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_spq_entry *tmp;
+ struct qed_spq_entry *found = NULL;
+
+ if (!p_hwfn)
+ return -EINVAL;
+
+ p_spq = p_hwfn->p_spq;
+ if (!p_spq)
+ return -EINVAL;
+
+ spin_lock_bh(&p_spq->lock);
+ list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) {
+ if (p_ent->elem.hdr.echo == echo) {
+ list_del(&p_ent->list);
+ qed_spq_comp_bmap_update(p_hwfn, echo);
+ p_spq->comp_count++;
+ found = p_ent;
+ break;
+ }
+
+ /* This is relatively uncommon - depends on scenarios
+ * which have mutliple per-PF sent ramrods.
+ */
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+ "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n",
+ le16_to_cpu(echo),
+ le16_to_cpu(p_ent->elem.hdr.echo));
+ }
+
+ /* Release lock before callback, as callback may post
+ * an additional ramrod.
+ */
+ spin_unlock_bh(&p_spq->lock);
+
+ if (!found) {
+ DP_NOTICE(p_hwfn,
+ "Failed to find an entry this EQE [echo %04x] completes\n",
+ le16_to_cpu(echo));
+ return -EEXIST;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+ "Complete EQE [echo %04x]: func %p cookie %p)\n",
+ le16_to_cpu(echo),
+ p_ent->comp_cb.function, p_ent->comp_cb.cookie);
+ if (found->comp_cb.function)
+ found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
+ fw_return_code);
+ else
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SPQ,
+ "Got a completion without a callback function\n");
+
+ if (found->comp_mode != QED_SPQ_MODE_EBLOCK)
+ /* EBLOCK is responsible for returning its own entry into the
+ * free list.
+ */
+ qed_spq_return_entry(p_hwfn, found);
+
+ return 0;
+}
+
+#define QED_SPQ_CONSQ_ELEM_SIZE 0x80
+
+int qed_consq_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_chain_init_params params = {
+ .mode = QED_CHAIN_MODE_PBL,
+ .intended_use = QED_CHAIN_USE_TO_PRODUCE,
+ .cnt_type = QED_CHAIN_CNT_TYPE_U16,
+ .num_elems = QED_CHAIN_PAGE_SIZE / QED_SPQ_CONSQ_ELEM_SIZE,
+ .elem_size = QED_SPQ_CONSQ_ELEM_SIZE,
+ };
+ struct qed_consq *p_consq;
+ int ret;
+
+ /* Allocate ConsQ struct */
+ p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL);
+ if (!p_consq)
+ return -ENOMEM;
+
+ /* Allocate and initialize ConsQ chain */
+ ret = qed_chain_alloc(p_hwfn->cdev, &p_consq->chain, &params);
+ if (ret) {
+ DP_NOTICE(p_hwfn, "Failed to allocate ConsQ chain");
+ goto consq_alloc_fail;
+ }
+
+ p_hwfn->p_consq = p_consq;
+
+ return 0;
+
+consq_alloc_fail:
+ kfree(p_consq);
+
+ return ret;
+}
+
+void qed_consq_setup(struct qed_hwfn *p_hwfn)
+{
+ qed_chain_reset(&p_hwfn->p_consq->chain);
+}
+
+void qed_consq_free(struct qed_hwfn *p_hwfn)
+{
+ if (!p_hwfn->p_consq)
+ return;
+
+ qed_chain_free(p_hwfn->cdev, &p_hwfn->p_consq->chain);
+
+ kfree(p_hwfn->p_consq);
+ p_hwfn->p_consq = NULL;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
new file mode 100644
index 000000000..911509c2b
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -0,0 +1,5305 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ * Copyright (c) 2019-2020 Marvell International Ltd.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/crc32.h>
+#include <linux/vmalloc.h>
+#include <linux/crash_dump.h>
+#include <linux/qed/qed_iov_if.h>
+#include "qed_cxt.h"
+#include "qed_hsi.h"
+#include "qed_iro_hsi.h"
+#include "qed_hw.h"
+#include "qed_init_ops.h"
+#include "qed_int.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+#include "qed_sp.h"
+#include "qed_sriov.h"
+#include "qed_vf.h"
+static int qed_iov_bulletin_set_mac(struct qed_hwfn *p_hwfn, u8 *mac, int vfid);
+
+static u16 qed_vf_from_entity_id(__le16 entity_id)
+{
+ return le16_to_cpu(entity_id) - MAX_NUM_PFS;
+}
+
+static u8 qed_vf_calculate_legacy(struct qed_vf_info *p_vf)
+{
+ u8 legacy = 0;
+
+ if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
+ ETH_HSI_VER_NO_PKT_LEN_TUNN)
+ legacy |= QED_QCID_LEGACY_VF_RX_PROD;
+
+ if (!(p_vf->acquire.vfdev_info.capabilities &
+ VFPF_ACQUIRE_CAP_QUEUE_QIDS))
+ legacy |= QED_QCID_LEGACY_VF_CID;
+
+ return legacy;
+}
+
+/* IOV ramrods */
+static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf)
+{
+ struct vf_start_ramrod_data *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc = -EINVAL;
+ u8 fp_minor;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_vf->opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_VF_START,
+ PROTOCOLID_COMMON, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.vf_start;
+
+ p_ramrod->vf_id = GET_FIELD(p_vf->concrete_fid, PXP_CONCRETE_FID_VFID);
+ p_ramrod->opaque_fid = cpu_to_le16(p_vf->opaque_fid);
+
+ switch (p_hwfn->hw_info.personality) {
+ case QED_PCI_ETH:
+ p_ramrod->personality = PERSONALITY_ETH;
+ break;
+ case QED_PCI_ETH_ROCE:
+ case QED_PCI_ETH_IWARP:
+ p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "Unknown VF personality %d\n",
+ p_hwfn->hw_info.personality);
+ qed_sp_destroy_request(p_hwfn, p_ent);
+ return -EINVAL;
+ }
+
+ fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor;
+ if (fp_minor > ETH_HSI_VER_MINOR &&
+ fp_minor != ETH_HSI_VER_NO_PKT_LEN_TUNN) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF [%d] - Requested fp hsi %02x.%02x which is slightly newer than PF's %02x.%02x; Configuring PFs version\n",
+ p_vf->abs_vf_id,
+ ETH_HSI_VER_MAJOR,
+ fp_minor, ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
+ fp_minor = ETH_HSI_VER_MINOR;
+ }
+
+ p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
+ p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = fp_minor;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF[%d] - Starting using HSI %02x.%02x\n",
+ p_vf->abs_vf_id, ETH_HSI_VER_MAJOR, fp_minor);
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn,
+ u32 concrete_vfid, u16 opaque_vfid)
+{
+ struct vf_stop_ramrod_data *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc = -EINVAL;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = opaque_vfid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_VF_STOP,
+ PROTOCOLID_COMMON, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.vf_stop;
+
+ p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
+ int rel_vf_id,
+ bool b_enabled_only, bool b_non_malicious)
+{
+ if (!p_hwfn->pf_iov_info) {
+ DP_NOTICE(p_hwfn->cdev, "No iov info\n");
+ return false;
+ }
+
+ if ((rel_vf_id >= p_hwfn->cdev->p_iov_info->total_vfs) ||
+ (rel_vf_id < 0))
+ return false;
+
+ if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) &&
+ b_enabled_only)
+ return false;
+
+ if ((p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_malicious) &&
+ b_non_malicious)
+ return false;
+
+ return true;
+}
+
+static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn,
+ u16 relative_vf_id,
+ bool b_enabled_only)
+{
+ struct qed_vf_info *vf = NULL;
+
+ if (!p_hwfn->pf_iov_info) {
+ DP_NOTICE(p_hwfn->cdev, "No iov info\n");
+ return NULL;
+ }
+
+ if (qed_iov_is_valid_vfid(p_hwfn, relative_vf_id,
+ b_enabled_only, false))
+ vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
+ else
+ DP_ERR(p_hwfn, "%s: VF[%d] is not enabled\n",
+ __func__, relative_vf_id);
+
+ return vf;
+}
+
+static struct qed_queue_cid *
+qed_iov_get_vf_rx_queue_cid(struct qed_vf_queue *p_queue)
+{
+ int i;
+
+ for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
+ if (p_queue->cids[i].p_cid && !p_queue->cids[i].b_is_tx)
+ return p_queue->cids[i].p_cid;
+ }
+
+ return NULL;
+}
+
+enum qed_iov_validate_q_mode {
+ QED_IOV_VALIDATE_Q_NA,
+ QED_IOV_VALIDATE_Q_ENABLE,
+ QED_IOV_VALIDATE_Q_DISABLE,
+};
+
+static bool qed_iov_validate_queue_mode(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf,
+ u16 qid,
+ enum qed_iov_validate_q_mode mode,
+ bool b_is_tx)
+{
+ int i;
+
+ if (mode == QED_IOV_VALIDATE_Q_NA)
+ return true;
+
+ for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
+ struct qed_vf_queue_cid *p_qcid;
+
+ p_qcid = &p_vf->vf_queues[qid].cids[i];
+
+ if (!p_qcid->p_cid)
+ continue;
+
+ if (p_qcid->b_is_tx != b_is_tx)
+ continue;
+
+ return mode == QED_IOV_VALIDATE_Q_ENABLE;
+ }
+
+ /* In case we haven't found any valid cid, then its disabled */
+ return mode == QED_IOV_VALIDATE_Q_DISABLE;
+}
+
+static bool qed_iov_validate_rxq(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf,
+ u16 rx_qid,
+ enum qed_iov_validate_q_mode mode)
+{
+ if (rx_qid >= p_vf->num_rxqs) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[0x%02x] - can't touch Rx queue[%04x]; Only 0x%04x are allocated\n",
+ p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs);
+ return false;
+ }
+
+ return qed_iov_validate_queue_mode(p_hwfn, p_vf, rx_qid, mode, false);
+}
+
+static bool qed_iov_validate_txq(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf,
+ u16 tx_qid,
+ enum qed_iov_validate_q_mode mode)
+{
+ if (tx_qid >= p_vf->num_txqs) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[0x%02x] - can't touch Tx queue[%04x]; Only 0x%04x are allocated\n",
+ p_vf->abs_vf_id, tx_qid, p_vf->num_txqs);
+ return false;
+ }
+
+ return qed_iov_validate_queue_mode(p_hwfn, p_vf, tx_qid, mode, true);
+}
+
+static bool qed_iov_validate_sb(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf, u16 sb_idx)
+{
+ int i;
+
+ for (i = 0; i < p_vf->num_sbs; i++)
+ if (p_vf->igu_sbs[i] == sb_idx)
+ return true;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[0%02x] - tried using sb_idx %04x which doesn't exist as one of its 0x%02x SBs\n",
+ p_vf->abs_vf_id, sb_idx, p_vf->num_sbs);
+
+ return false;
+}
+
+static bool qed_iov_validate_active_rxq(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf)
+{
+ u8 i;
+
+ for (i = 0; i < p_vf->num_rxqs; i++)
+ if (qed_iov_validate_queue_mode(p_hwfn, p_vf, i,
+ QED_IOV_VALIDATE_Q_ENABLE,
+ false))
+ return true;
+
+ return false;
+}
+
+static bool qed_iov_validate_active_txq(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf)
+{
+ u8 i;
+
+ for (i = 0; i < p_vf->num_txqs; i++)
+ if (qed_iov_validate_queue_mode(p_hwfn, p_vf, i,
+ QED_IOV_VALIDATE_Q_ENABLE,
+ true))
+ return true;
+
+ return false;
+}
+
+static int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn,
+ int vfid, struct qed_ptt *p_ptt)
+{
+ struct qed_bulletin_content *p_bulletin;
+ int crc_size = sizeof(p_bulletin->crc);
+ struct qed_dmae_params params;
+ struct qed_vf_info *p_vf;
+
+ p_vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!p_vf)
+ return -EINVAL;
+
+ if (!p_vf->vf_bulletin)
+ return -EINVAL;
+
+ p_bulletin = p_vf->bulletin.p_virt;
+
+ /* Increment bulletin board version and compute crc */
+ p_bulletin->version++;
+ p_bulletin->crc = crc32(0, (u8 *)p_bulletin + crc_size,
+ p_vf->bulletin.size - crc_size);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n",
+ p_bulletin->version, p_vf->relative_vf_id, p_bulletin->crc);
+
+ /* propagate bulletin board via dmae to vm memory */
+ memset(&params, 0, sizeof(params));
+ SET_FIELD(params.flags, QED_DMAE_PARAMS_DST_VF_VALID, 0x1);
+ params.dst_vfid = p_vf->abs_vf_id;
+ return qed_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys,
+ p_vf->vf_bulletin, p_vf->bulletin.size / 4,
+ &params);
+}
+
+static int qed_iov_pci_cfg_info(struct qed_dev *cdev)
+{
+ struct qed_hw_sriov_info *iov = cdev->p_iov_info;
+ int pos = iov->pos;
+
+ DP_VERBOSE(cdev, QED_MSG_IOV, "sriov ext pos %d\n", pos);
+ pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
+
+ pci_read_config_word(cdev->pdev,
+ pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs);
+ pci_read_config_word(cdev->pdev,
+ pos + PCI_SRIOV_INITIAL_VF, &iov->initial_vfs);
+
+ pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs);
+ if (iov->num_vfs) {
+ DP_VERBOSE(cdev,
+ QED_MSG_IOV,
+ "Number of VFs are already set to non-zero value. Ignoring PCI configuration value\n");
+ iov->num_vfs = 0;
+ }
+
+ pci_read_config_word(cdev->pdev,
+ pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
+
+ pci_read_config_word(cdev->pdev,
+ pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
+
+ pci_read_config_word(cdev->pdev,
+ pos + PCI_SRIOV_VF_DID, &iov->vf_device_id);
+
+ pci_read_config_dword(cdev->pdev,
+ pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
+
+ pci_read_config_dword(cdev->pdev, pos + PCI_SRIOV_CAP, &iov->cap);
+
+ pci_read_config_byte(cdev->pdev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
+
+ DP_VERBOSE(cdev,
+ QED_MSG_IOV,
+ "IOV info: nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
+ iov->nres,
+ iov->cap,
+ iov->ctrl,
+ iov->total_vfs,
+ iov->initial_vfs,
+ iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
+
+ /* Some sanity checks */
+ if (iov->num_vfs > NUM_OF_VFS(cdev) ||
+ iov->total_vfs > NUM_OF_VFS(cdev)) {
+ /* This can happen only due to a bug. In this case we set
+ * num_vfs to zero to avoid memory corruption in the code that
+ * assumes max number of vfs
+ */
+ DP_NOTICE(cdev,
+ "IOV: Unexpected number of vfs set: %d setting num_vf to zero\n",
+ iov->num_vfs);
+
+ iov->num_vfs = 0;
+ iov->total_vfs = 0;
+ }
+
+ return 0;
+}
+
+static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn)
+{
+ struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
+ struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
+ struct qed_bulletin_content *p_bulletin_virt;
+ dma_addr_t req_p, rply_p, bulletin_p;
+ union pfvf_tlvs *p_reply_virt_addr;
+ union vfpf_tlvs *p_req_virt_addr;
+ u8 idx = 0;
+
+ memset(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array));
+
+ p_req_virt_addr = p_iov_info->mbx_msg_virt_addr;
+ req_p = p_iov_info->mbx_msg_phys_addr;
+ p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr;
+ rply_p = p_iov_info->mbx_reply_phys_addr;
+ p_bulletin_virt = p_iov_info->p_bulletins;
+ bulletin_p = p_iov_info->bulletins_phys;
+ if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
+ DP_ERR(p_hwfn,
+ "%s called without allocating mem first\n", __func__);
+ return;
+ }
+
+ for (idx = 0; idx < p_iov->total_vfs; idx++) {
+ struct qed_vf_info *vf = &p_iov_info->vfs_array[idx];
+ u32 concrete;
+
+ vf->vf_mbx.req_virt = p_req_virt_addr + idx;
+ vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs);
+ vf->vf_mbx.reply_virt = p_reply_virt_addr + idx;
+ vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs);
+
+ vf->state = VF_STOPPED;
+ vf->b_init = false;
+
+ vf->bulletin.phys = idx *
+ sizeof(struct qed_bulletin_content) +
+ bulletin_p;
+ vf->bulletin.p_virt = p_bulletin_virt + idx;
+ vf->bulletin.size = sizeof(struct qed_bulletin_content);
+
+ vf->relative_vf_id = idx;
+ vf->abs_vf_id = idx + p_iov->first_vf_in_pf;
+ concrete = qed_vfid_to_concrete(p_hwfn, vf->abs_vf_id);
+ vf->concrete_fid = concrete;
+ vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
+ (vf->abs_vf_id << 8);
+ vf->vport_id = idx + 1;
+
+ vf->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS;
+ vf->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
+ }
+}
+
+static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn)
+{
+ struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
+ void **p_v_addr;
+ u16 num_vfs = 0;
+
+ num_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "%s for %d VFs\n", __func__, num_vfs);
+
+ /* Allocate PF Mailbox buffer (per-VF) */
+ p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
+ p_v_addr = &p_iov_info->mbx_msg_virt_addr;
+ *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ p_iov_info->mbx_msg_size,
+ &p_iov_info->mbx_msg_phys_addr,
+ GFP_KERNEL);
+ if (!*p_v_addr)
+ return -ENOMEM;
+
+ /* Allocate PF Mailbox Reply buffer (per-VF) */
+ p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs;
+ p_v_addr = &p_iov_info->mbx_reply_virt_addr;
+ *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ p_iov_info->mbx_reply_size,
+ &p_iov_info->mbx_reply_phys_addr,
+ GFP_KERNEL);
+ if (!*p_v_addr)
+ return -ENOMEM;
+
+ p_iov_info->bulletins_size = sizeof(struct qed_bulletin_content) *
+ num_vfs;
+ p_v_addr = &p_iov_info->p_bulletins;
+ *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ p_iov_info->bulletins_size,
+ &p_iov_info->bulletins_phys,
+ GFP_KERNEL);
+ if (!*p_v_addr)
+ return -ENOMEM;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "PF's Requests mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n",
+ p_iov_info->mbx_msg_virt_addr,
+ (u64)p_iov_info->mbx_msg_phys_addr,
+ p_iov_info->mbx_reply_virt_addr,
+ (u64)p_iov_info->mbx_reply_phys_addr,
+ p_iov_info->p_bulletins, (u64)p_iov_info->bulletins_phys);
+
+ return 0;
+}
+
+static void qed_iov_free_vfdb(struct qed_hwfn *p_hwfn)
+{
+ struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
+
+ if (p_hwfn->pf_iov_info->mbx_msg_virt_addr)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ p_iov_info->mbx_msg_size,
+ p_iov_info->mbx_msg_virt_addr,
+ p_iov_info->mbx_msg_phys_addr);
+
+ if (p_hwfn->pf_iov_info->mbx_reply_virt_addr)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ p_iov_info->mbx_reply_size,
+ p_iov_info->mbx_reply_virt_addr,
+ p_iov_info->mbx_reply_phys_addr);
+
+ if (p_iov_info->p_bulletins)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ p_iov_info->bulletins_size,
+ p_iov_info->p_bulletins,
+ p_iov_info->bulletins_phys);
+}
+
+int qed_iov_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_pf_iov *p_sriov;
+
+ if (!IS_PF_SRIOV(p_hwfn)) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "No SR-IOV - no need for IOV db\n");
+ return 0;
+ }
+
+ p_sriov = kzalloc(sizeof(*p_sriov), GFP_KERNEL);
+ if (!p_sriov)
+ return -ENOMEM;
+
+ p_hwfn->pf_iov_info = p_sriov;
+
+ qed_spq_register_async_cb(p_hwfn, PROTOCOLID_COMMON,
+ qed_sriov_eqe_event);
+
+ return qed_iov_allocate_vfdb(p_hwfn);
+}
+
+void qed_iov_setup(struct qed_hwfn *p_hwfn)
+{
+ if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
+ return;
+
+ qed_iov_setup_vfdb(p_hwfn);
+}
+
+void qed_iov_free(struct qed_hwfn *p_hwfn)
+{
+ qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_COMMON);
+
+ if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
+ qed_iov_free_vfdb(p_hwfn);
+ kfree(p_hwfn->pf_iov_info);
+ }
+}
+
+void qed_iov_free_hw_info(struct qed_dev *cdev)
+{
+ kfree(cdev->p_iov_info);
+ cdev->p_iov_info = NULL;
+}
+
+int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
+{
+ struct qed_dev *cdev = p_hwfn->cdev;
+ int pos;
+ int rc;
+
+ if (is_kdump_kernel())
+ return 0;
+
+ if (IS_VF(p_hwfn->cdev))
+ return 0;
+
+ /* Learn the PCI configuration */
+ pos = pci_find_ext_capability(p_hwfn->cdev->pdev,
+ PCI_EXT_CAP_ID_SRIOV);
+ if (!pos) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No PCIe IOV support\n");
+ return 0;
+ }
+
+ /* Allocate a new struct for IOV information */
+ cdev->p_iov_info = kzalloc(sizeof(*cdev->p_iov_info), GFP_KERNEL);
+ if (!cdev->p_iov_info)
+ return -ENOMEM;
+
+ cdev->p_iov_info->pos = pos;
+
+ rc = qed_iov_pci_cfg_info(cdev);
+ if (rc)
+ return rc;
+
+ /* We want PF IOV to be synonemous with the existence of p_iov_info;
+ * In case the capability is published but there are no VFs, simply
+ * de-allocate the struct.
+ */
+ if (!cdev->p_iov_info->total_vfs) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "IOV capabilities, but no VFs are published\n");
+ kfree(cdev->p_iov_info);
+ cdev->p_iov_info = NULL;
+ return 0;
+ }
+
+ /* First VF index based on offset is tricky:
+ * - If ARI is supported [likely], offset - (16 - pf_id) would
+ * provide the number for eng0. 2nd engine Vfs would begin
+ * after the first engine's VFs.
+ * - If !ARI, VFs would start on next device.
+ * so offset - (256 - pf_id) would provide the number.
+ * Utilize the fact that (256 - pf_id) is achieved only by later
+ * to differentiate between the two.
+ */
+
+ if (p_hwfn->cdev->p_iov_info->offset < (256 - p_hwfn->abs_pf_id)) {
+ u32 first = p_hwfn->cdev->p_iov_info->offset +
+ p_hwfn->abs_pf_id - 16;
+
+ cdev->p_iov_info->first_vf_in_pf = first;
+
+ if (QED_PATH_ID(p_hwfn))
+ cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
+ } else {
+ u32 first = p_hwfn->cdev->p_iov_info->offset +
+ p_hwfn->abs_pf_id - 256;
+
+ cdev->p_iov_info->first_vf_in_pf = first;
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "First VF in hwfn 0x%08x\n",
+ cdev->p_iov_info->first_vf_in_pf);
+
+ return 0;
+}
+
+static bool _qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn,
+ int vfid, bool b_fail_malicious)
+{
+ /* Check PF supports sriov */
+ if (IS_VF(p_hwfn->cdev) || !IS_QED_SRIOV(p_hwfn->cdev) ||
+ !IS_PF_SRIOV_ALLOC(p_hwfn))
+ return false;
+
+ /* Check VF validity */
+ if (!qed_iov_is_valid_vfid(p_hwfn, vfid, true, b_fail_malicious))
+ return false;
+
+ return true;
+}
+
+static bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid)
+{
+ return _qed_iov_pf_sanity_check(p_hwfn, vfid, true);
+}
+
+static void qed_iov_set_vf_to_disable(struct qed_dev *cdev,
+ u16 rel_vf_id, u8 to_disable)
+{
+ struct qed_vf_info *vf;
+ int i;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
+ if (!vf)
+ continue;
+
+ vf->to_disable = to_disable;
+ }
+}
+
+static void qed_iov_set_vfs_to_disable(struct qed_dev *cdev, u8 to_disable)
+{
+ u16 i;
+
+ if (!IS_QED_SRIOV(cdev))
+ return;
+
+ for (i = 0; i < cdev->p_iov_info->total_vfs; i++)
+ qed_iov_set_vf_to_disable(cdev, i, to_disable);
+}
+
+static void qed_iov_vf_pglue_clear_err(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 abs_vfid)
+{
+ qed_wr(p_hwfn, p_ptt,
+ PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4,
+ 1 << (abs_vfid & 0x1f));
+}
+
+static void qed_iov_vf_igu_reset(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, struct qed_vf_info *vf)
+{
+ int i;
+
+ /* Set VF masks and configuration - pretend */
+ qed_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
+
+ qed_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0);
+
+ /* unpretend */
+ qed_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
+
+ /* iterate over all queues, clear sb consumer */
+ for (i = 0; i < vf->num_sbs; i++)
+ qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
+ vf->igu_sbs[i],
+ vf->opaque_fid, true);
+}
+
+static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf, bool enable)
+{
+ u32 igu_vf_conf;
+
+ qed_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
+
+ igu_vf_conf = qed_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION);
+
+ if (enable)
+ igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN;
+ else
+ igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN;
+
+ qed_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf);
+
+ /* unpretend */
+ qed_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
+}
+
+static int
+qed_iov_enable_vf_access_msix(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 abs_vf_id, u8 num_sbs)
+{
+ u8 current_max = 0;
+ int i;
+
+ /* For AH onward, configuration is per-PF. Find maximum of all
+ * the currently enabled child VFs, and set the number to be that.
+ */
+ if (!QED_IS_BB(p_hwfn->cdev)) {
+ qed_for_each_vf(p_hwfn, i) {
+ struct qed_vf_info *p_vf;
+
+ p_vf = qed_iov_get_vf_info(p_hwfn, (u16)i, true);
+ if (!p_vf)
+ continue;
+
+ current_max = max_t(u8, current_max, p_vf->num_sbs);
+ }
+ }
+
+ if (num_sbs > current_max)
+ return qed_mcp_config_vf_msix(p_hwfn, p_ptt,
+ abs_vf_id, num_sbs);
+
+ return 0;
+}
+
+static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
+ int rc;
+
+ /* It's possible VF was previously considered malicious -
+ * clear the indication even if we're only going to disable VF.
+ */
+ vf->b_malicious = false;
+
+ if (vf->to_disable)
+ return 0;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "Enable internal access for vf %x [abs %x]\n",
+ vf->abs_vf_id, QED_VF_ABS_ID(p_hwfn, vf));
+
+ qed_iov_vf_pglue_clear_err(p_hwfn, p_ptt, QED_VF_ABS_ID(p_hwfn, vf));
+
+ qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
+
+ rc = qed_iov_enable_vf_access_msix(p_hwfn, p_ptt,
+ vf->abs_vf_id, vf->num_sbs);
+ if (rc)
+ return rc;
+
+ qed_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
+
+ SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id);
+ STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf);
+
+ qed_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id,
+ p_hwfn->hw_info.hw_mode);
+
+ /* unpretend */
+ qed_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
+
+ vf->state = VF_FREE;
+
+ return rc;
+}
+
+/**
+ * qed_iov_config_perm_table() - Configure the permission zone table.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT window for writing the registers.
+ * @vf: VF info data.
+ * @enable: The actual permision for this VF.
+ *
+ * In E4, queue zone permission table size is 320x9. There
+ * are 320 VF queues for single engine device (256 for dual
+ * engine device), and each entry has the following format:
+ * {Valid, VF[7:0]}
+ */
+static void qed_iov_config_perm_table(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf, u8 enable)
+{
+ u32 reg_addr, val;
+ u16 qzone_id = 0;
+ int qid;
+
+ for (qid = 0; qid < vf->num_rxqs; qid++) {
+ qed_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid,
+ &qzone_id);
+
+ reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4;
+ val = enable ? (vf->abs_vf_id | BIT(8)) : 0;
+ qed_wr(p_hwfn, p_ptt, reg_addr, val);
+ }
+}
+
+static void qed_iov_enable_vf_traffic(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ /* Reset vf in IGU - interrupts are still disabled */
+ qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
+
+ qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1);
+
+ /* Permission Table */
+ qed_iov_config_perm_table(p_hwfn, p_ptt, vf, true);
+}
+
+static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf, u16 num_rx_queues)
+{
+ struct qed_igu_block *p_block;
+ struct cau_sb_entry sb_entry;
+ int qid = 0;
+ u32 val = 0;
+
+ if (num_rx_queues > p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov)
+ num_rx_queues = p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov;
+ p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov -= num_rx_queues;
+
+ SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
+ SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
+ SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0);
+
+ for (qid = 0; qid < num_rx_queues; qid++) {
+ p_block = qed_get_igu_free_sb(p_hwfn, false);
+ vf->igu_sbs[qid] = p_block->igu_sb_id;
+ p_block->status &= ~QED_IGU_STATUS_FREE;
+ SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
+
+ qed_wr(p_hwfn, p_ptt,
+ IGU_REG_MAPPING_MEMORY +
+ sizeof(u32) * p_block->igu_sb_id, val);
+
+ /* Configure igu sb in CAU which were marked valid */
+ qed_init_cau_sb_entry(p_hwfn, &sb_entry,
+ p_hwfn->rel_pf_id, vf->abs_vf_id, 1);
+
+ qed_dmae_host2grc(p_hwfn, p_ptt,
+ (u64)(uintptr_t)&sb_entry,
+ CAU_REG_SB_VAR_MEMORY +
+ p_block->igu_sb_id * sizeof(u64), 2, NULL);
+ }
+
+ vf->num_sbs = (u8)num_rx_queues;
+
+ return vf->num_sbs;
+}
+
+static void qed_iov_free_vf_igu_sbs(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
+ int idx, igu_id;
+ u32 addr, val;
+
+ /* Invalidate igu CAM lines and mark them as free */
+ for (idx = 0; idx < vf->num_sbs; idx++) {
+ igu_id = vf->igu_sbs[idx];
+ addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id;
+
+ val = qed_rd(p_hwfn, p_ptt, addr);
+ SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
+ qed_wr(p_hwfn, p_ptt, addr, val);
+
+ p_info->entry[igu_id].status |= QED_IGU_STATUS_FREE;
+ p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov++;
+ }
+
+ vf->num_sbs = 0;
+}
+
+static void qed_iov_set_link(struct qed_hwfn *p_hwfn,
+ u16 vfid,
+ struct qed_mcp_link_params *params,
+ struct qed_mcp_link_state *link,
+ struct qed_mcp_link_capabilities *p_caps)
+{
+ struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn,
+ vfid,
+ false);
+ struct qed_bulletin_content *p_bulletin;
+
+ if (!p_vf)
+ return;
+
+ p_bulletin = p_vf->bulletin.p_virt;
+ p_bulletin->req_autoneg = params->speed.autoneg;
+ p_bulletin->req_adv_speed = params->speed.advertised_speeds;
+ p_bulletin->req_forced_speed = params->speed.forced_speed;
+ p_bulletin->req_autoneg_pause = params->pause.autoneg;
+ p_bulletin->req_forced_rx = params->pause.forced_rx;
+ p_bulletin->req_forced_tx = params->pause.forced_tx;
+ p_bulletin->req_loopback = params->loopback_mode;
+
+ p_bulletin->link_up = link->link_up;
+ p_bulletin->speed = link->speed;
+ p_bulletin->full_duplex = link->full_duplex;
+ p_bulletin->autoneg = link->an;
+ p_bulletin->autoneg_complete = link->an_complete;
+ p_bulletin->parallel_detection = link->parallel_detection;
+ p_bulletin->pfc_enabled = link->pfc_enabled;
+ p_bulletin->partner_adv_speed = link->partner_adv_speed;
+ p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
+ p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
+ p_bulletin->partner_adv_pause = link->partner_adv_pause;
+ p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
+
+ p_bulletin->capability_speed = p_caps->speed_capabilities;
+}
+
+static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_iov_vf_init_params *p_params)
+{
+ struct qed_mcp_link_capabilities link_caps;
+ struct qed_mcp_link_params link_params;
+ struct qed_mcp_link_state link_state;
+ u8 num_of_vf_avaiable_chains = 0;
+ struct qed_vf_info *vf = NULL;
+ u16 qid, num_irqs;
+ int rc = 0;
+ u32 cids;
+ u8 i;
+
+ vf = qed_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false);
+ if (!vf) {
+ DP_ERR(p_hwfn, "%s : vf is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ if (vf->b_init) {
+ DP_NOTICE(p_hwfn, "VF[%d] is already active.\n",
+ p_params->rel_vf_id);
+ return -EINVAL;
+ }
+
+ /* Perform sanity checking on the requested queue_id */
+ for (i = 0; i < p_params->num_queues; i++) {
+ u16 min_vf_qzone = FEAT_NUM(p_hwfn, QED_PF_L2_QUE);
+ u16 max_vf_qzone = min_vf_qzone +
+ FEAT_NUM(p_hwfn, QED_VF_L2_QUE) - 1;
+
+ qid = p_params->req_rx_queue[i];
+ if (qid < min_vf_qzone || qid > max_vf_qzone) {
+ DP_NOTICE(p_hwfn,
+ "Can't enable Rx qid [%04x] for VF[%d]: qids [0x%04x,...,0x%04x] available\n",
+ qid,
+ p_params->rel_vf_id,
+ min_vf_qzone, max_vf_qzone);
+ return -EINVAL;
+ }
+
+ qid = p_params->req_tx_queue[i];
+ if (qid > max_vf_qzone) {
+ DP_NOTICE(p_hwfn,
+ "Can't enable Tx qid [%04x] for VF[%d]: max qid 0x%04x\n",
+ qid, p_params->rel_vf_id, max_vf_qzone);
+ return -EINVAL;
+ }
+
+ /* If client *really* wants, Tx qid can be shared with PF */
+ if (qid < min_vf_qzone)
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[%d] is using PF qid [0x%04x] for Txq[0x%02x]\n",
+ p_params->rel_vf_id, qid, i);
+ }
+
+ /* Limit number of queues according to number of CIDs */
+ qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[%d] - requesting to initialize for 0x%04x queues [0x%04x CIDs available]\n",
+ vf->relative_vf_id, p_params->num_queues, (u16)cids);
+ num_irqs = min_t(u16, p_params->num_queues, ((u16)cids));
+
+ num_of_vf_avaiable_chains = qed_iov_alloc_vf_igu_sbs(p_hwfn,
+ p_ptt,
+ vf, num_irqs);
+ if (!num_of_vf_avaiable_chains) {
+ DP_ERR(p_hwfn, "no available igu sbs\n");
+ return -ENOMEM;
+ }
+
+ /* Choose queue number and index ranges */
+ vf->num_rxqs = num_of_vf_avaiable_chains;
+ vf->num_txqs = num_of_vf_avaiable_chains;
+
+ for (i = 0; i < vf->num_rxqs; i++) {
+ struct qed_vf_queue *p_queue = &vf->vf_queues[i];
+
+ p_queue->fw_rx_qid = p_params->req_rx_queue[i];
+ p_queue->fw_tx_qid = p_params->req_tx_queue[i];
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x]\n",
+ vf->relative_vf_id, i, vf->igu_sbs[i],
+ p_queue->fw_rx_qid, p_queue->fw_tx_qid);
+ }
+
+ /* Update the link configuration in bulletin */
+ memcpy(&link_params, qed_mcp_get_link_params(p_hwfn),
+ sizeof(link_params));
+ memcpy(&link_state, qed_mcp_get_link_state(p_hwfn), sizeof(link_state));
+ memcpy(&link_caps, qed_mcp_get_link_capabilities(p_hwfn),
+ sizeof(link_caps));
+ qed_iov_set_link(p_hwfn, p_params->rel_vf_id,
+ &link_params, &link_state, &link_caps);
+
+ rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, vf);
+ if (!rc) {
+ vf->b_init = true;
+
+ if (IS_LEAD_HWFN(p_hwfn))
+ p_hwfn->cdev->p_iov_info->num_vfs++;
+ }
+
+ return rc;
+}
+
+static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u16 rel_vf_id)
+{
+ struct qed_mcp_link_capabilities caps;
+ struct qed_mcp_link_params params;
+ struct qed_mcp_link_state link;
+ struct qed_vf_info *vf = NULL;
+
+ vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+ if (!vf) {
+ DP_ERR(p_hwfn, "%s : vf is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ if (vf->bulletin.p_virt)
+ memset(vf->bulletin.p_virt, 0, sizeof(*vf->bulletin.p_virt));
+
+ memset(&vf->p_vf_info, 0, sizeof(vf->p_vf_info));
+
+ /* Get the link configuration back in bulletin so
+ * that when VFs are re-enabled they get the actual
+ * link configuration.
+ */
+ memcpy(&params, qed_mcp_get_link_params(p_hwfn), sizeof(params));
+ memcpy(&link, qed_mcp_get_link_state(p_hwfn), sizeof(link));
+ memcpy(&caps, qed_mcp_get_link_capabilities(p_hwfn), sizeof(caps));
+ qed_iov_set_link(p_hwfn, rel_vf_id, &params, &link, &caps);
+
+ /* Forget the VF's acquisition message */
+ memset(&vf->acquire, 0, sizeof(vf->acquire));
+
+ /* disablng interrupts and resetting permission table was done during
+ * vf-close, however, we could get here without going through vf_close
+ */
+ /* Disable Interrupts for VF */
+ qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
+
+ /* Reset Permission table */
+ qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
+
+ vf->num_rxqs = 0;
+ vf->num_txqs = 0;
+ qed_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf);
+
+ if (vf->b_init) {
+ vf->b_init = false;
+
+ if (IS_LEAD_HWFN(p_hwfn))
+ p_hwfn->cdev->p_iov_info->num_vfs--;
+ }
+
+ return 0;
+}
+
+static bool qed_iov_tlv_supported(u16 tlvtype)
+{
+ return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX;
+}
+
+/* place a given tlv on the tlv buffer, continuing current tlv list */
+void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length)
+{
+ struct channel_tlv *tl = (struct channel_tlv *)*offset;
+
+ tl->type = type;
+ tl->length = length;
+
+ /* Offset should keep pointing to next TLV (the end of the last) */
+ *offset += length;
+
+ /* Return a pointer to the start of the added tlv */
+ return *offset - length;
+}
+
+/* list the types and lengths of the tlvs on the buffer */
+void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list)
+{
+ u16 i = 1, total_length = 0;
+ struct channel_tlv *tlv;
+
+ do {
+ tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length);
+
+ /* output tlv */
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "TLV number %d: type %d, length %d\n",
+ i, tlv->type, tlv->length);
+
+ if (tlv->type == CHANNEL_TLV_LIST_END)
+ return;
+
+ /* Validate entry - protect against malicious VFs */
+ if (!tlv->length) {
+ DP_NOTICE(p_hwfn, "TLV of length 0 found\n");
+ return;
+ }
+
+ total_length += tlv->length;
+
+ if (total_length >= sizeof(struct tlv_buffer_size)) {
+ DP_NOTICE(p_hwfn, "TLV ==> Buffer overflow\n");
+ return;
+ }
+
+ i++;
+ } while (1);
+}
+
+static void qed_iov_send_response(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *p_vf,
+ u16 length, u8 status)
+{
+ struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
+ struct qed_dmae_params params;
+ u8 eng_vf_id;
+
+ mbx->reply_virt->default_resp.hdr.status = status;
+
+ qed_dp_tlv_list(p_hwfn, mbx->reply_virt);
+
+ eng_vf_id = p_vf->abs_vf_id;
+
+ memset(&params, 0, sizeof(params));
+ SET_FIELD(params.flags, QED_DMAE_PARAMS_DST_VF_VALID, 0x1);
+ params.dst_vfid = eng_vf_id;
+
+ qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64),
+ mbx->req_virt->first_tlv.reply_address +
+ sizeof(u64),
+ (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
+ &params);
+
+ /* Once PF copies the rc to the VF, the latter can continue
+ * and send an additional message. So we have to make sure the
+ * channel would be re-set to ready prior to that.
+ */
+ REG_WR(p_hwfn,
+ GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_USDM_RAM,
+ USTORM_VF_PF_CHANNEL_READY, eng_vf_id), 1);
+
+ qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
+ mbx->req_virt->first_tlv.reply_address,
+ sizeof(u64) / 4, &params);
+}
+
+static u16 qed_iov_vport_to_tlv(struct qed_hwfn *p_hwfn,
+ enum qed_iov_vport_update_flag flag)
+{
+ switch (flag) {
+ case QED_IOV_VP_UPDATE_ACTIVATE:
+ return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
+ case QED_IOV_VP_UPDATE_VLAN_STRIP:
+ return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
+ case QED_IOV_VP_UPDATE_TX_SWITCH:
+ return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
+ case QED_IOV_VP_UPDATE_MCAST:
+ return CHANNEL_TLV_VPORT_UPDATE_MCAST;
+ case QED_IOV_VP_UPDATE_ACCEPT_PARAM:
+ return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
+ case QED_IOV_VP_UPDATE_RSS:
+ return CHANNEL_TLV_VPORT_UPDATE_RSS;
+ case QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN:
+ return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
+ case QED_IOV_VP_UPDATE_SGE_TPA:
+ return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
+ default:
+ return 0;
+ }
+}
+
+static u16 qed_iov_prep_vp_update_resp_tlvs(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf,
+ struct qed_iov_vf_mbx *p_mbx,
+ u8 status,
+ u16 tlvs_mask, u16 tlvs_accepted)
+{
+ struct pfvf_def_resp_tlv *resp;
+ u16 size, total_len, i;
+
+ memset(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs));
+ p_mbx->offset = (u8 *)p_mbx->reply_virt;
+ size = sizeof(struct pfvf_def_resp_tlv);
+ total_len = size;
+
+ qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);
+
+ /* Prepare response for all extended tlvs if they are found by PF */
+ for (i = 0; i < QED_IOV_VP_UPDATE_MAX; i++) {
+ if (!(tlvs_mask & BIT(i)))
+ continue;
+
+ resp = qed_add_tlv(p_hwfn, &p_mbx->offset,
+ qed_iov_vport_to_tlv(p_hwfn, i), size);
+
+ if (tlvs_accepted & BIT(i))
+ resp->hdr.status = status;
+ else
+ resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[%d] - vport_update response: TLV %d, status %02x\n",
+ p_vf->relative_vf_id,
+ qed_iov_vport_to_tlv(p_hwfn, i), resp->hdr.status);
+
+ total_len += size;
+ }
+
+ qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ return total_len;
+}
+
+static void qed_iov_prepare_resp(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf_info,
+ u16 type, u16 length, u8 status)
+{
+ struct qed_iov_vf_mbx *mbx = &vf_info->vf_mbx;
+
+ mbx->offset = (u8 *)mbx->reply_virt;
+
+ qed_add_tlv(p_hwfn, &mbx->offset, type, length);
+ qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ qed_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
+}
+
+static struct
+qed_public_vf_info *qed_iov_get_public_vf_info(struct qed_hwfn *p_hwfn,
+ u16 relative_vf_id,
+ bool b_enabled_only)
+{
+ struct qed_vf_info *vf = NULL;
+
+ vf = qed_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only);
+ if (!vf)
+ return NULL;
+
+ return &vf->p_vf_info;
+}
+
+static void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid)
+{
+ struct qed_public_vf_info *vf_info;
+
+ vf_info = qed_iov_get_public_vf_info(p_hwfn, vfid, false);
+
+ if (!vf_info)
+ return;
+
+ /* Clear the VF mac */
+ eth_zero_addr(vf_info->mac);
+
+ vf_info->rx_accept_mode = 0;
+ vf_info->tx_accept_mode = 0;
+}
+
+static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf)
+{
+ u32 i, j;
+
+ p_vf->vf_bulletin = 0;
+ p_vf->vport_instance = 0;
+ p_vf->configured_features = 0;
+
+ /* If VF previously requested less resources, go back to default */
+ p_vf->num_rxqs = p_vf->num_sbs;
+ p_vf->num_txqs = p_vf->num_sbs;
+
+ p_vf->num_active_rxqs = 0;
+
+ for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) {
+ struct qed_vf_queue *p_queue = &p_vf->vf_queues[i];
+
+ for (j = 0; j < MAX_QUEUES_PER_QZONE; j++) {
+ if (!p_queue->cids[j].p_cid)
+ continue;
+
+ qed_eth_queue_cid_release(p_hwfn,
+ p_queue->cids[j].p_cid);
+ p_queue->cids[j].p_cid = NULL;
+ }
+ }
+
+ memset(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config));
+ memset(&p_vf->acquire, 0, sizeof(p_vf->acquire));
+ qed_iov_clean_vf(p_hwfn, p_vf->relative_vf_id);
+}
+
+/* Returns either 0, or log(size) */
+static u32 qed_iov_vf_db_bar_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 val = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_BAR1_SIZE);
+
+ if (val)
+ return val + 11;
+ return 0;
+}
+
+static void
+qed_iov_vf_mbx_acquire_resc_cids(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *p_vf,
+ struct vf_pf_resc_request *p_req,
+ struct pf_vf_resc *p_resp)
+{
+ u8 num_vf_cons = p_hwfn->pf_params.eth_pf_params.num_vf_cons;
+ u8 db_size = qed_db_addr_vf(1, DQ_DEMS_LEGACY) -
+ qed_db_addr_vf(0, DQ_DEMS_LEGACY);
+ u32 bar_size;
+
+ p_resp->num_cids = min_t(u8, p_req->num_cids, num_vf_cons);
+
+ /* If VF didn't bother asking for QIDs than don't bother limiting
+ * number of CIDs. The VF doesn't care about the number, and this
+ * has the likely result of causing an additional acquisition.
+ */
+ if (!(p_vf->acquire.vfdev_info.capabilities &
+ VFPF_ACQUIRE_CAP_QUEUE_QIDS))
+ return;
+
+ /* If doorbell bar was mapped by VF, limit the VF CIDs to an amount
+ * that would make sure doorbells for all CIDs fall within the bar.
+ * If it doesn't, make sure regview window is sufficient.
+ */
+ if (p_vf->acquire.vfdev_info.capabilities &
+ VFPF_ACQUIRE_CAP_PHYSICAL_BAR) {
+ bar_size = qed_iov_vf_db_bar_size(p_hwfn, p_ptt);
+ if (bar_size)
+ bar_size = 1 << bar_size;
+
+ if (p_hwfn->cdev->num_hwfns > 1)
+ bar_size /= 2;
+ } else {
+ bar_size = PXP_VF_BAR0_DQ_LENGTH;
+ }
+
+ if (bar_size / db_size < 256)
+ p_resp->num_cids = min_t(u8, p_resp->num_cids,
+ (u8)(bar_size / db_size));
+}
+
+static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *p_vf,
+ struct vf_pf_resc_request *p_req,
+ struct pf_vf_resc *p_resp)
+{
+ u8 i;
+
+ /* Queue related information */
+ p_resp->num_rxqs = p_vf->num_rxqs;
+ p_resp->num_txqs = p_vf->num_txqs;
+ p_resp->num_sbs = p_vf->num_sbs;
+
+ for (i = 0; i < p_resp->num_sbs; i++) {
+ p_resp->hw_sbs[i].hw_sb_id = p_vf->igu_sbs[i];
+ p_resp->hw_sbs[i].sb_qid = 0;
+ }
+
+ /* These fields are filled for backward compatibility.
+ * Unused by modern vfs.
+ */
+ for (i = 0; i < p_resp->num_rxqs; i++) {
+ qed_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid,
+ (u16 *)&p_resp->hw_qid[i]);
+ p_resp->cid[i] = i;
+ }
+
+ /* Filter related information */
+ p_resp->num_mac_filters = min_t(u8, p_vf->num_mac_filters,
+ p_req->num_mac_filters);
+ p_resp->num_vlan_filters = min_t(u8, p_vf->num_vlan_filters,
+ p_req->num_vlan_filters);
+
+ qed_iov_vf_mbx_acquire_resc_cids(p_hwfn, p_ptt, p_vf, p_req, p_resp);
+
+ /* This isn't really needed/enforced, but some legacy VFs might depend
+ * on the correct filling of this field.
+ */
+ p_resp->num_mc_filters = QED_MAX_MC_ADDRS;
+
+ /* Validate sufficient resources for VF */
+ if (p_resp->num_rxqs < p_req->num_rxqs ||
+ p_resp->num_txqs < p_req->num_txqs ||
+ p_resp->num_sbs < p_req->num_sbs ||
+ p_resp->num_mac_filters < p_req->num_mac_filters ||
+ p_resp->num_vlan_filters < p_req->num_vlan_filters ||
+ p_resp->num_mc_filters < p_req->num_mc_filters ||
+ p_resp->num_cids < p_req->num_cids) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]\n",
+ p_vf->abs_vf_id,
+ p_req->num_rxqs,
+ p_resp->num_rxqs,
+ p_req->num_rxqs,
+ p_resp->num_txqs,
+ p_req->num_sbs,
+ p_resp->num_sbs,
+ p_req->num_mac_filters,
+ p_resp->num_mac_filters,
+ p_req->num_vlan_filters,
+ p_resp->num_vlan_filters,
+ p_req->num_mc_filters,
+ p_resp->num_mc_filters,
+ p_req->num_cids, p_resp->num_cids);
+
+ /* Some legacy OSes are incapable of correctly handling this
+ * failure.
+ */
+ if ((p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
+ ETH_HSI_VER_NO_PKT_LEN_TUNN) &&
+ (p_vf->acquire.vfdev_info.os_type ==
+ VFPF_ACQUIRE_OS_WINDOWS))
+ return PFVF_STATUS_SUCCESS;
+
+ return PFVF_STATUS_NO_RESOURCE;
+ }
+
+ return PFVF_STATUS_SUCCESS;
+}
+
+static void qed_iov_vf_mbx_acquire_stats(struct qed_hwfn *p_hwfn,
+ struct pfvf_stats_info *p_stats)
+{
+ p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B +
+ offsetof(struct mstorm_vf_zone,
+ non_trigger.eth_queue_stat);
+ p_stats->mstats.len = sizeof(struct eth_mstorm_per_queue_stat);
+ p_stats->ustats.address = PXP_VF_BAR0_START_USDM_ZONE_B +
+ offsetof(struct ustorm_vf_zone,
+ non_trigger.eth_queue_stat);
+ p_stats->ustats.len = sizeof(struct eth_ustorm_per_queue_stat);
+ p_stats->pstats.address = PXP_VF_BAR0_START_PSDM_ZONE_B +
+ offsetof(struct pstorm_vf_zone,
+ non_trigger.eth_queue_stat);
+ p_stats->pstats.len = sizeof(struct eth_pstorm_per_queue_stat);
+ p_stats->tstats.address = 0;
+ p_stats->tstats.len = 0;
+}
+
+static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+ struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp;
+ struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
+ struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire;
+ u8 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
+ struct pf_vf_resc *resc = &resp->resc;
+ int rc;
+
+ memset(resp, 0, sizeof(*resp));
+
+ /* Write the PF version so that VF would know which version
+ * is supported - might be later overridden. This guarantees that
+ * VF could recognize legacy PF based on lack of versions in reply.
+ */
+ pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
+ pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR;
+
+ if (vf->state != VF_FREE && vf->state != VF_STOPPED) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[%d] sent ACQUIRE but is already in state %d - fail request\n",
+ vf->abs_vf_id, vf->state);
+ goto out;
+ }
+
+ /* Validate FW compatibility */
+ if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) {
+ if (req->vfdev_info.capabilities &
+ VFPF_ACQUIRE_CAP_PRE_FP_HSI) {
+ struct vf_pf_vfdev_info *p_vfdev = &req->vfdev_info;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF[%d] is pre-fastpath HSI\n",
+ vf->abs_vf_id);
+ p_vfdev->eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
+ p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN;
+ } else {
+ DP_INFO(p_hwfn,
+ "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's fastpath HSI %02x.%02x\n",
+ vf->abs_vf_id,
+ req->vfdev_info.eth_fp_hsi_major,
+ req->vfdev_info.eth_fp_hsi_minor,
+ ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
+
+ goto out;
+ }
+ }
+
+ /* On 100g PFs, prevent old VFs from loading */
+ if ((p_hwfn->cdev->num_hwfns > 1) &&
+ !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) {
+ DP_INFO(p_hwfn,
+ "VF[%d] is running an old driver that doesn't support 100g\n",
+ vf->abs_vf_id);
+ goto out;
+ }
+
+ /* Store the acquire message */
+ memcpy(&vf->acquire, req, sizeof(vf->acquire));
+
+ vf->opaque_fid = req->vfdev_info.opaque_fid;
+
+ vf->vf_bulletin = req->bulletin_addr;
+ vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ?
+ vf->bulletin.size : req->bulletin_size;
+
+ /* fill in pfdev info */
+ pfdev_info->chip_num = p_hwfn->cdev->chip_num;
+ pfdev_info->db_size = 0;
+ pfdev_info->indices_per_sb = PIS_PER_SB;
+
+ pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
+ PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
+ if (p_hwfn->cdev->num_hwfns > 1)
+ pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
+
+ /* Share our ability to use multiple queue-ids only with VFs
+ * that request it.
+ */
+ if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_QUEUE_QIDS)
+ pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_QUEUE_QIDS;
+
+ /* Share the sizes of the bars with VF */
+ resp->pfdev_info.bar_size = qed_iov_vf_db_bar_size(p_hwfn, p_ptt);
+
+ qed_iov_vf_mbx_acquire_stats(p_hwfn, &pfdev_info->stats_info);
+
+ memcpy(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
+
+ pfdev_info->fw_major = FW_MAJOR_VERSION;
+ pfdev_info->fw_minor = FW_MINOR_VERSION;
+ pfdev_info->fw_rev = FW_REVISION_VERSION;
+ pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
+
+ /* Incorrect when legacy, but doesn't matter as legacy isn't reading
+ * this field.
+ */
+ pfdev_info->minor_fp_hsi = min_t(u8, ETH_HSI_VER_MINOR,
+ req->vfdev_info.eth_fp_hsi_minor);
+ pfdev_info->os_type = VFPF_ACQUIRE_OS_LINUX;
+ qed_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver, NULL);
+
+ pfdev_info->dev_type = p_hwfn->cdev->type;
+ pfdev_info->chip_rev = p_hwfn->cdev->chip_rev;
+
+ /* Fill resources available to VF; Make sure there are enough to
+ * satisfy the VF's request.
+ */
+ vfpf_status = qed_iov_vf_mbx_acquire_resc(p_hwfn, p_ptt, vf,
+ &req->resc_request, resc);
+ if (vfpf_status != PFVF_STATUS_SUCCESS)
+ goto out;
+
+ /* Start the VF in FW */
+ rc = qed_sp_vf_start(p_hwfn, vf);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to start VF[%02x]\n", vf->abs_vf_id);
+ vfpf_status = PFVF_STATUS_FAILURE;
+ goto out;
+ }
+
+ /* Fill agreed size of bulletin board in response */
+ resp->bulletin_size = vf->bulletin.size;
+ qed_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt);
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%llx\n"
+ "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d\n",
+ vf->abs_vf_id,
+ resp->pfdev_info.chip_num,
+ resp->pfdev_info.db_size,
+ resp->pfdev_info.indices_per_sb,
+ resp->pfdev_info.capabilities,
+ resc->num_rxqs,
+ resc->num_txqs,
+ resc->num_sbs,
+ resc->num_mac_filters,
+ resc->num_vlan_filters);
+ vf->state = VF_ACQUIRED;
+
+ /* Prepare Response */
+out:
+ qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE,
+ sizeof(struct pfvf_acquire_resp_tlv), vfpf_status);
+}
+
+static int __qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf, bool val)
+{
+ struct qed_sp_vport_update_params params;
+ int rc;
+
+ if (val == p_vf->spoof_chk) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Spoofchk value[%d] is already configured\n", val);
+ return 0;
+ }
+
+ memset(&params, 0, sizeof(struct qed_sp_vport_update_params));
+ params.opaque_fid = p_vf->opaque_fid;
+ params.vport_id = p_vf->vport_id;
+ params.update_anti_spoofing_en_flg = 1;
+ params.anti_spoofing_en = val;
+
+ rc = qed_sp_vport_update(p_hwfn, &params, QED_SPQ_MODE_EBLOCK, NULL);
+ if (!rc) {
+ p_vf->spoof_chk = val;
+ p_vf->req_spoofchk_val = p_vf->spoof_chk;
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Spoofchk val[%d] configured\n", val);
+ } else {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Spoofchk configuration[val:%d] failed for VF[%d]\n",
+ val, p_vf->relative_vf_id);
+ }
+
+ return rc;
+}
+
+static int qed_iov_reconfigure_unicast_vlan(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf)
+{
+ struct qed_filter_ucast filter;
+ int rc = 0;
+ int i;
+
+ memset(&filter, 0, sizeof(filter));
+ filter.is_rx_filter = 1;
+ filter.is_tx_filter = 1;
+ filter.vport_to_add_to = p_vf->vport_id;
+ filter.opcode = QED_FILTER_ADD;
+
+ /* Reconfigure vlans */
+ for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
+ if (!p_vf->shadow_config.vlans[i].used)
+ continue;
+
+ filter.type = QED_FILTER_VLAN;
+ filter.vlan = p_vf->shadow_config.vlans[i].vid;
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
+ filter.vlan, p_vf->relative_vf_id);
+ rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
+ &filter, QED_SPQ_MODE_CB, NULL);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "Failed to configure VLAN [%04x] to VF [%04x]\n",
+ filter.vlan, p_vf->relative_vf_id);
+ break;
+ }
+ }
+
+ return rc;
+}
+
+static int
+qed_iov_reconfigure_unicast_shadow(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf, u64 events)
+{
+ int rc = 0;
+
+ if ((events & BIT(VLAN_ADDR_FORCED)) &&
+ !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED)))
+ rc = qed_iov_reconfigure_unicast_vlan(p_hwfn, p_vf);
+
+ return rc;
+}
+
+static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf, u64 events)
+{
+ int rc = 0;
+ struct qed_filter_ucast filter;
+
+ if (!p_vf->vport_instance)
+ return -EINVAL;
+
+ if ((events & BIT(MAC_ADDR_FORCED)) ||
+ p_vf->p_vf_info.is_trusted_configured) {
+ /* Since there's no way [currently] of removing the MAC,
+ * we can always assume this means we need to force it.
+ */
+ memset(&filter, 0, sizeof(filter));
+ filter.type = QED_FILTER_MAC;
+ filter.opcode = QED_FILTER_REPLACE;
+ filter.is_rx_filter = 1;
+ filter.is_tx_filter = 1;
+ filter.vport_to_add_to = p_vf->vport_id;
+ ether_addr_copy(filter.mac, p_vf->bulletin.p_virt->mac);
+
+ rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
+ &filter, QED_SPQ_MODE_CB, NULL);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "PF failed to configure MAC for VF\n");
+ return rc;
+ }
+ if (p_vf->p_vf_info.is_trusted_configured)
+ p_vf->configured_features |=
+ BIT(VFPF_BULLETIN_MAC_ADDR);
+ else
+ p_vf->configured_features |=
+ BIT(MAC_ADDR_FORCED);
+ }
+
+ if (events & BIT(VLAN_ADDR_FORCED)) {
+ struct qed_sp_vport_update_params vport_update;
+ u8 removal;
+ int i;
+
+ memset(&filter, 0, sizeof(filter));
+ filter.type = QED_FILTER_VLAN;
+ filter.is_rx_filter = 1;
+ filter.is_tx_filter = 1;
+ filter.vport_to_add_to = p_vf->vport_id;
+ filter.vlan = p_vf->bulletin.p_virt->pvid;
+ filter.opcode = filter.vlan ? QED_FILTER_REPLACE :
+ QED_FILTER_FLUSH;
+
+ /* Send the ramrod */
+ rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
+ &filter, QED_SPQ_MODE_CB, NULL);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "PF failed to configure VLAN for VF\n");
+ return rc;
+ }
+
+ /* Update the default-vlan & silent vlan stripping */
+ memset(&vport_update, 0, sizeof(vport_update));
+ vport_update.opaque_fid = p_vf->opaque_fid;
+ vport_update.vport_id = p_vf->vport_id;
+ vport_update.update_default_vlan_enable_flg = 1;
+ vport_update.default_vlan_enable_flg = filter.vlan ? 1 : 0;
+ vport_update.update_default_vlan_flg = 1;
+ vport_update.default_vlan = filter.vlan;
+
+ vport_update.update_inner_vlan_removal_flg = 1;
+ removal = filter.vlan ? 1
+ : p_vf->shadow_config.inner_vlan_removal;
+ vport_update.inner_vlan_removal_flg = removal;
+ vport_update.silent_vlan_removal_flg = filter.vlan ? 1 : 0;
+ rc = qed_sp_vport_update(p_hwfn,
+ &vport_update,
+ QED_SPQ_MODE_EBLOCK, NULL);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "PF failed to configure VF vport for vlan\n");
+ return rc;
+ }
+
+ /* Update all the Rx queues */
+ for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) {
+ struct qed_vf_queue *p_queue = &p_vf->vf_queues[i];
+ struct qed_queue_cid *p_cid = NULL;
+
+ /* There can be at most 1 Rx queue on qzone. Find it */
+ p_cid = qed_iov_get_vf_rx_queue_cid(p_queue);
+ if (!p_cid)
+ continue;
+
+ rc = qed_sp_eth_rx_queues_update(p_hwfn,
+ (void **)&p_cid,
+ 1, 0, 1,
+ QED_SPQ_MODE_EBLOCK,
+ NULL);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "Failed to send Rx update fo queue[0x%04x]\n",
+ p_cid->rel.queue_id);
+ return rc;
+ }
+ }
+
+ if (filter.vlan)
+ p_vf->configured_features |= 1 << VLAN_ADDR_FORCED;
+ else
+ p_vf->configured_features &= ~BIT(VLAN_ADDR_FORCED);
+ }
+
+ /* If forced features are terminated, we need to configure the shadow
+ * configuration back again.
+ */
+ if (events)
+ qed_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events);
+
+ return rc;
+}
+
+static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ struct qed_sp_vport_start_params params = { 0 };
+ struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+ struct vfpf_vport_start_tlv *start;
+ u8 status = PFVF_STATUS_SUCCESS;
+ struct qed_vf_info *vf_info;
+ u64 *p_bitmap;
+ int sb_id;
+ int rc;
+
+ vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vf->relative_vf_id, true);
+ if (!vf_info) {
+ DP_NOTICE(p_hwfn->cdev,
+ "Failed to get VF info, invalid vfid [%d]\n",
+ vf->relative_vf_id);
+ return;
+ }
+
+ vf->state = VF_ENABLED;
+ start = &mbx->req_virt->start_vport;
+
+ qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
+
+ /* Initialize Status block in CAU */
+ for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {
+ if (!start->sb_addr[sb_id]) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF[%d] did not fill the address of SB %d\n",
+ vf->relative_vf_id, sb_id);
+ break;
+ }
+
+ qed_int_cau_conf_sb(p_hwfn, p_ptt,
+ start->sb_addr[sb_id],
+ vf->igu_sbs[sb_id], vf->abs_vf_id, 1);
+ }
+
+ vf->mtu = start->mtu;
+ vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal;
+
+ /* Take into consideration configuration forced by hypervisor;
+ * If none is configured, use the supplied VF values [for old
+ * vfs that would still be fine, since they passed '0' as padding].
+ */
+ p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap;
+ if (!(*p_bitmap & BIT(VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) {
+ u8 vf_req = start->only_untagged;
+
+ vf_info->bulletin.p_virt->default_only_untagged = vf_req;
+ *p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT;
+ }
+
+ params.tpa_mode = start->tpa_mode;
+ params.remove_inner_vlan = start->inner_vlan_removal;
+ params.tx_switching = true;
+
+ params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged;
+ params.drop_ttl0 = false;
+ params.concrete_fid = vf->concrete_fid;
+ params.opaque_fid = vf->opaque_fid;
+ params.vport_id = vf->vport_id;
+ params.max_buffers_per_cqe = start->max_buffers_per_cqe;
+ params.mtu = vf->mtu;
+
+ /* Non trusted VFs should enable control frame filtering */
+ params.check_mac = !vf->p_vf_info.is_trusted_configured;
+
+ rc = qed_sp_eth_vport_start(p_hwfn, &params);
+ if (rc) {
+ DP_ERR(p_hwfn,
+ "%s returned error %d\n", __func__, rc);
+ status = PFVF_STATUS_FAILURE;
+ } else {
+ vf->vport_instance++;
+
+ /* Force configuration if needed on the newly opened vport */
+ qed_iov_configure_vport_forced(p_hwfn, vf, *p_bitmap);
+
+ __qed_iov_spoofchk_set(p_hwfn, vf, vf->req_spoofchk_val);
+ }
+ qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START,
+ sizeof(struct pfvf_def_resp_tlv), status);
+}
+
+static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ u8 status = PFVF_STATUS_SUCCESS;
+ int rc;
+
+ vf->vport_instance--;
+ vf->spoof_chk = false;
+
+ if ((qed_iov_validate_active_rxq(p_hwfn, vf)) ||
+ (qed_iov_validate_active_txq(p_hwfn, vf))) {
+ vf->b_malicious = true;
+ DP_NOTICE(p_hwfn,
+ "VF [%02x] - considered malicious; Unable to stop RX/TX queues\n",
+ vf->abs_vf_id);
+ status = PFVF_STATUS_MALICIOUS;
+ goto out;
+ }
+
+ rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
+ if (rc) {
+ DP_ERR(p_hwfn, "%s returned error %d\n",
+ __func__, rc);
+ status = PFVF_STATUS_FAILURE;
+ }
+
+ /* Forget the configuration on the vport */
+ vf->configured_features = 0;
+ memset(&vf->shadow_config, 0, sizeof(vf->shadow_config));
+
+out:
+ qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN,
+ sizeof(struct pfvf_def_resp_tlv), status);
+}
+
+static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf,
+ u8 status, bool b_legacy)
+{
+ struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+ struct pfvf_start_queue_resp_tlv *p_tlv;
+ struct vfpf_start_rxq_tlv *req;
+ u16 length;
+
+ mbx->offset = (u8 *)mbx->reply_virt;
+
+ /* Taking a bigger struct instead of adding a TLV to list was a
+ * mistake, but one which we're now stuck with, as some older
+ * clients assume the size of the previous response.
+ */
+ if (!b_legacy)
+ length = sizeof(*p_tlv);
+ else
+ length = sizeof(struct pfvf_def_resp_tlv);
+
+ p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ,
+ length);
+ qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* Update the TLV with the response */
+ if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
+ req = &mbx->req_virt->start_rxq;
+ p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B +
+ offsetof(struct mstorm_vf_zone,
+ non_trigger.eth_rx_queue_producers) +
+ sizeof(struct eth_rx_prod_data) * req->rx_qid;
+ }
+
+ qed_iov_send_response(p_hwfn, p_ptt, vf, length, status);
+}
+
+static u8 qed_iov_vf_mbx_qid(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf, bool b_is_tx)
+{
+ struct qed_iov_vf_mbx *p_mbx = &p_vf->vf_mbx;
+ struct vfpf_qid_tlv *p_qid_tlv;
+
+ /* Search for the qid if the VF published its going to provide it */
+ if (!(p_vf->acquire.vfdev_info.capabilities &
+ VFPF_ACQUIRE_CAP_QUEUE_QIDS)) {
+ if (b_is_tx)
+ return QED_IOV_LEGACY_QID_TX;
+ else
+ return QED_IOV_LEGACY_QID_RX;
+ }
+
+ p_qid_tlv = (struct vfpf_qid_tlv *)
+ qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
+ CHANNEL_TLV_QID);
+ if (!p_qid_tlv) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF[%2x]: Failed to provide qid\n",
+ p_vf->relative_vf_id);
+
+ return QED_IOV_QID_INVALID;
+ }
+
+ if (p_qid_tlv->qid >= MAX_QUEUES_PER_QZONE) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF[%02x]: Provided qid out-of-bounds %02x\n",
+ p_vf->relative_vf_id, p_qid_tlv->qid);
+ return QED_IOV_QID_INVALID;
+ }
+
+ return p_qid_tlv->qid;
+}
+
+static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ struct qed_queue_start_common_params params;
+ struct qed_queue_cid_vf_params vf_params;
+ struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+ u8 status = PFVF_STATUS_NO_RESOURCE;
+ u8 qid_usage_idx, vf_legacy = 0;
+ struct vfpf_start_rxq_tlv *req;
+ struct qed_vf_queue *p_queue;
+ struct qed_queue_cid *p_cid;
+ struct qed_sb_info sb_dummy;
+ int rc;
+
+ req = &mbx->req_virt->start_rxq;
+
+ if (!qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid,
+ QED_IOV_VALIDATE_Q_DISABLE) ||
+ !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
+ goto out;
+
+ qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false);
+ if (qid_usage_idx == QED_IOV_QID_INVALID)
+ goto out;
+
+ p_queue = &vf->vf_queues[req->rx_qid];
+ if (p_queue->cids[qid_usage_idx].p_cid)
+ goto out;
+
+ vf_legacy = qed_vf_calculate_legacy(vf);
+
+ /* Acquire a new queue-cid */
+ memset(&params, 0, sizeof(params));
+ params.queue_id = p_queue->fw_rx_qid;
+ params.vport_id = vf->vport_id;
+ params.stats_id = vf->abs_vf_id + 0x10;
+ /* Since IGU index is passed via sb_info, construct a dummy one */
+ memset(&sb_dummy, 0, sizeof(sb_dummy));
+ sb_dummy.igu_sb_id = req->hw_sb;
+ params.p_sb = &sb_dummy;
+ params.sb_idx = req->sb_index;
+
+ memset(&vf_params, 0, sizeof(vf_params));
+ vf_params.vfid = vf->relative_vf_id;
+ vf_params.vf_qid = (u8)req->rx_qid;
+ vf_params.vf_legacy = vf_legacy;
+ vf_params.qid_usage_idx = qid_usage_idx;
+ p_cid = qed_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
+ &params, true, &vf_params);
+ if (!p_cid)
+ goto out;
+
+ /* Legacy VFs have their Producers in a different location, which they
+ * calculate on their own and clean the producer prior to this.
+ */
+ if (!(vf_legacy & QED_QCID_LEGACY_VF_RX_PROD))
+ qed_wr(p_hwfn, p_ptt, MSEM_REG_FAST_MEMORY +
+ SEM_FAST_REG_INT_RAM +
+ MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id,
+ req->rx_qid), 0);
+
+ rc = qed_eth_rxq_start_ramrod(p_hwfn, p_cid,
+ req->bd_max_bytes,
+ req->rxq_addr,
+ req->cqe_pbl_addr, req->cqe_pbl_size);
+ if (rc) {
+ status = PFVF_STATUS_FAILURE;
+ qed_eth_queue_cid_release(p_hwfn, p_cid);
+ } else {
+ p_queue->cids[qid_usage_idx].p_cid = p_cid;
+ p_queue->cids[qid_usage_idx].b_is_tx = false;
+ status = PFVF_STATUS_SUCCESS;
+ vf->num_active_rxqs++;
+ }
+
+out:
+ qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status,
+ !!(vf_legacy &
+ QED_QCID_LEGACY_VF_RX_PROD));
+}
+
+static void
+qed_iov_pf_update_tun_response(struct pfvf_update_tunn_param_tlv *p_resp,
+ struct qed_tunnel_info *p_tun,
+ u16 tunn_feature_mask)
+{
+ p_resp->tunn_feature_mask = tunn_feature_mask;
+ p_resp->vxlan_mode = p_tun->vxlan.b_mode_enabled;
+ p_resp->l2geneve_mode = p_tun->l2_geneve.b_mode_enabled;
+ p_resp->ipgeneve_mode = p_tun->ip_geneve.b_mode_enabled;
+ p_resp->l2gre_mode = p_tun->l2_gre.b_mode_enabled;
+ p_resp->ipgre_mode = p_tun->l2_gre.b_mode_enabled;
+ p_resp->vxlan_clss = p_tun->vxlan.tun_cls;
+ p_resp->l2gre_clss = p_tun->l2_gre.tun_cls;
+ p_resp->ipgre_clss = p_tun->ip_gre.tun_cls;
+ p_resp->l2geneve_clss = p_tun->l2_geneve.tun_cls;
+ p_resp->ipgeneve_clss = p_tun->ip_geneve.tun_cls;
+ p_resp->geneve_udp_port = p_tun->geneve_port.port;
+ p_resp->vxlan_udp_port = p_tun->vxlan_port.port;
+}
+
+static void
+__qed_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
+ struct qed_tunn_update_type *p_tun,
+ enum qed_tunn_mode mask, u8 tun_cls)
+{
+ if (p_req->tun_mode_update_mask & BIT(mask)) {
+ p_tun->b_update_mode = true;
+
+ if (p_req->tunn_mode & BIT(mask))
+ p_tun->b_mode_enabled = true;
+ }
+
+ p_tun->tun_cls = tun_cls;
+}
+
+static void
+qed_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
+ struct qed_tunn_update_type *p_tun,
+ struct qed_tunn_update_udp_port *p_port,
+ enum qed_tunn_mode mask,
+ u8 tun_cls, u8 update_port, u16 port)
+{
+ if (update_port) {
+ p_port->b_update_port = true;
+ p_port->port = port;
+ }
+
+ __qed_iov_pf_update_tun_param(p_req, p_tun, mask, tun_cls);
+}
+
+static bool
+qed_iov_pf_validate_tunn_param(struct vfpf_update_tunn_param_tlv *p_req)
+{
+ bool b_update_requested = false;
+
+ if (p_req->tun_mode_update_mask || p_req->update_tun_cls ||
+ p_req->update_geneve_port || p_req->update_vxlan_port)
+ b_update_requested = true;
+
+ return b_update_requested;
+}
+
+static void qed_pf_validate_tunn_mode(struct qed_tunn_update_type *tun, int *rc)
+{
+ if (tun->b_update_mode && !tun->b_mode_enabled) {
+ tun->b_update_mode = false;
+ *rc = -EINVAL;
+ }
+}
+
+static int
+qed_pf_validate_modify_tunn_config(struct qed_hwfn *p_hwfn,
+ u16 *tun_features, bool *update,
+ struct qed_tunnel_info *tun_src)
+{
+ struct qed_eth_cb_ops *ops = p_hwfn->cdev->protocol_ops.eth;
+ struct qed_tunnel_info *tun = &p_hwfn->cdev->tunnel;
+ u16 bultn_vxlan_port, bultn_geneve_port;
+ void *cookie = p_hwfn->cdev->ops_cookie;
+ int i, rc = 0;
+
+ *tun_features = p_hwfn->cdev->tunn_feature_mask;
+ bultn_vxlan_port = tun->vxlan_port.port;
+ bultn_geneve_port = tun->geneve_port.port;
+ qed_pf_validate_tunn_mode(&tun_src->vxlan, &rc);
+ qed_pf_validate_tunn_mode(&tun_src->l2_geneve, &rc);
+ qed_pf_validate_tunn_mode(&tun_src->ip_geneve, &rc);
+ qed_pf_validate_tunn_mode(&tun_src->l2_gre, &rc);
+ qed_pf_validate_tunn_mode(&tun_src->ip_gre, &rc);
+
+ if ((tun_src->b_update_rx_cls || tun_src->b_update_tx_cls) &&
+ (tun_src->vxlan.tun_cls != QED_TUNN_CLSS_MAC_VLAN ||
+ tun_src->l2_geneve.tun_cls != QED_TUNN_CLSS_MAC_VLAN ||
+ tun_src->ip_geneve.tun_cls != QED_TUNN_CLSS_MAC_VLAN ||
+ tun_src->l2_gre.tun_cls != QED_TUNN_CLSS_MAC_VLAN ||
+ tun_src->ip_gre.tun_cls != QED_TUNN_CLSS_MAC_VLAN)) {
+ tun_src->b_update_rx_cls = false;
+ tun_src->b_update_tx_cls = false;
+ rc = -EINVAL;
+ }
+
+ if (tun_src->vxlan_port.b_update_port) {
+ if (tun_src->vxlan_port.port == tun->vxlan_port.port) {
+ tun_src->vxlan_port.b_update_port = false;
+ } else {
+ *update = true;
+ bultn_vxlan_port = tun_src->vxlan_port.port;
+ }
+ }
+
+ if (tun_src->geneve_port.b_update_port) {
+ if (tun_src->geneve_port.port == tun->geneve_port.port) {
+ tun_src->geneve_port.b_update_port = false;
+ } else {
+ *update = true;
+ bultn_geneve_port = tun_src->geneve_port.port;
+ }
+ }
+
+ qed_for_each_vf(p_hwfn, i) {
+ qed_iov_bulletin_set_udp_ports(p_hwfn, i, bultn_vxlan_port,
+ bultn_geneve_port);
+ }
+
+ qed_schedule_iov(p_hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
+ ops->ports_update(cookie, bultn_vxlan_port, bultn_geneve_port);
+
+ return rc;
+}
+
+static void qed_iov_vf_mbx_update_tunn_param(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *p_vf)
+{
+ struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel;
+ struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
+ struct pfvf_update_tunn_param_tlv *p_resp;
+ struct vfpf_update_tunn_param_tlv *p_req;
+ u8 status = PFVF_STATUS_SUCCESS;
+ bool b_update_required = false;
+ struct qed_tunnel_info tunn;
+ u16 tunn_feature_mask = 0;
+ int i, rc = 0;
+
+ mbx->offset = (u8 *)mbx->reply_virt;
+
+ memset(&tunn, 0, sizeof(tunn));
+ p_req = &mbx->req_virt->tunn_param_update;
+
+ if (!qed_iov_pf_validate_tunn_param(p_req)) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "No tunnel update requested by VF\n");
+ status = PFVF_STATUS_FAILURE;
+ goto send_resp;
+ }
+
+ tunn.b_update_rx_cls = p_req->update_tun_cls;
+ tunn.b_update_tx_cls = p_req->update_tun_cls;
+
+ qed_iov_pf_update_tun_param(p_req, &tunn.vxlan, &tunn.vxlan_port,
+ QED_MODE_VXLAN_TUNN, p_req->vxlan_clss,
+ p_req->update_vxlan_port,
+ p_req->vxlan_port);
+ qed_iov_pf_update_tun_param(p_req, &tunn.l2_geneve, &tunn.geneve_port,
+ QED_MODE_L2GENEVE_TUNN,
+ p_req->l2geneve_clss,
+ p_req->update_geneve_port,
+ p_req->geneve_port);
+ __qed_iov_pf_update_tun_param(p_req, &tunn.ip_geneve,
+ QED_MODE_IPGENEVE_TUNN,
+ p_req->ipgeneve_clss);
+ __qed_iov_pf_update_tun_param(p_req, &tunn.l2_gre,
+ QED_MODE_L2GRE_TUNN, p_req->l2gre_clss);
+ __qed_iov_pf_update_tun_param(p_req, &tunn.ip_gre,
+ QED_MODE_IPGRE_TUNN, p_req->ipgre_clss);
+
+ /* If PF modifies VF's req then it should
+ * still return an error in case of partial configuration
+ * or modified configuration as opposed to requested one.
+ */
+ rc = qed_pf_validate_modify_tunn_config(p_hwfn, &tunn_feature_mask,
+ &b_update_required, &tunn);
+
+ if (rc)
+ status = PFVF_STATUS_FAILURE;
+
+ /* If QED client is willing to update anything ? */
+ if (b_update_required) {
+ u16 geneve_port;
+
+ rc = qed_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, &tunn,
+ QED_SPQ_MODE_EBLOCK, NULL);
+ if (rc)
+ status = PFVF_STATUS_FAILURE;
+
+ geneve_port = p_tun->geneve_port.port;
+ qed_for_each_vf(p_hwfn, i) {
+ qed_iov_bulletin_set_udp_ports(p_hwfn, i,
+ p_tun->vxlan_port.port,
+ geneve_port);
+ }
+ }
+
+send_resp:
+ p_resp = qed_add_tlv(p_hwfn, &mbx->offset,
+ CHANNEL_TLV_UPDATE_TUNN_PARAM, sizeof(*p_resp));
+
+ qed_iov_pf_update_tun_response(p_resp, p_tun, tunn_feature_mask);
+ qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
+}
+
+static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *p_vf,
+ u32 cid, u8 status)
+{
+ struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
+ struct pfvf_start_queue_resp_tlv *p_tlv;
+ bool b_legacy = false;
+ u16 length;
+
+ mbx->offset = (u8 *)mbx->reply_virt;
+
+ /* Taking a bigger struct instead of adding a TLV to list was a
+ * mistake, but one which we're now stuck with, as some older
+ * clients assume the size of the previous response.
+ */
+ if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
+ ETH_HSI_VER_NO_PKT_LEN_TUNN)
+ b_legacy = true;
+
+ if (!b_legacy)
+ length = sizeof(*p_tlv);
+ else
+ length = sizeof(struct pfvf_def_resp_tlv);
+
+ p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_TXQ,
+ length);
+ qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* Update the TLV with the response */
+ if ((status == PFVF_STATUS_SUCCESS) && !b_legacy)
+ p_tlv->offset = qed_db_addr_vf(cid, DQ_DEMS_LEGACY);
+
+ qed_iov_send_response(p_hwfn, p_ptt, p_vf, length, status);
+}
+
+static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ struct qed_queue_start_common_params params;
+ struct qed_queue_cid_vf_params vf_params;
+ struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+ u8 status = PFVF_STATUS_NO_RESOURCE;
+ struct vfpf_start_txq_tlv *req;
+ struct qed_vf_queue *p_queue;
+ struct qed_queue_cid *p_cid;
+ struct qed_sb_info sb_dummy;
+ u8 qid_usage_idx, vf_legacy;
+ u32 cid = 0;
+ int rc;
+ u16 pq;
+
+ memset(&params, 0, sizeof(params));
+ req = &mbx->req_virt->start_txq;
+
+ if (!qed_iov_validate_txq(p_hwfn, vf, req->tx_qid,
+ QED_IOV_VALIDATE_Q_NA) ||
+ !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
+ goto out;
+
+ qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, true);
+ if (qid_usage_idx == QED_IOV_QID_INVALID)
+ goto out;
+
+ p_queue = &vf->vf_queues[req->tx_qid];
+ if (p_queue->cids[qid_usage_idx].p_cid)
+ goto out;
+
+ vf_legacy = qed_vf_calculate_legacy(vf);
+
+ /* Acquire a new queue-cid */
+ params.queue_id = p_queue->fw_tx_qid;
+ params.vport_id = vf->vport_id;
+ params.stats_id = vf->abs_vf_id + 0x10;
+
+ /* Since IGU index is passed via sb_info, construct a dummy one */
+ memset(&sb_dummy, 0, sizeof(sb_dummy));
+ sb_dummy.igu_sb_id = req->hw_sb;
+ params.p_sb = &sb_dummy;
+ params.sb_idx = req->sb_index;
+
+ memset(&vf_params, 0, sizeof(vf_params));
+ vf_params.vfid = vf->relative_vf_id;
+ vf_params.vf_qid = (u8)req->tx_qid;
+ vf_params.vf_legacy = vf_legacy;
+ vf_params.qid_usage_idx = qid_usage_idx;
+
+ p_cid = qed_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
+ &params, false, &vf_params);
+ if (!p_cid)
+ goto out;
+
+ pq = qed_get_cm_pq_idx_vf(p_hwfn, vf->relative_vf_id);
+ rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid,
+ req->pbl_addr, req->pbl_size, pq);
+ if (rc) {
+ status = PFVF_STATUS_FAILURE;
+ qed_eth_queue_cid_release(p_hwfn, p_cid);
+ } else {
+ status = PFVF_STATUS_SUCCESS;
+ p_queue->cids[qid_usage_idx].p_cid = p_cid;
+ p_queue->cids[qid_usage_idx].b_is_tx = true;
+ cid = p_cid->cid;
+ }
+
+out:
+ qed_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf, cid, status);
+}
+
+static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *vf,
+ u16 rxq_id,
+ u8 qid_usage_idx, bool cqe_completion)
+{
+ struct qed_vf_queue *p_queue;
+ int rc = 0;
+
+ if (!qed_iov_validate_rxq(p_hwfn, vf, rxq_id, QED_IOV_VALIDATE_Q_NA)) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[%d] Tried Closing Rx 0x%04x.%02x which is inactive\n",
+ vf->relative_vf_id, rxq_id, qid_usage_idx);
+ return -EINVAL;
+ }
+
+ p_queue = &vf->vf_queues[rxq_id];
+
+ /* We've validated the index and the existence of the active RXQ -
+ * now we need to make sure that it's using the correct qid.
+ */
+ if (!p_queue->cids[qid_usage_idx].p_cid ||
+ p_queue->cids[qid_usage_idx].b_is_tx) {
+ struct qed_queue_cid *p_cid;
+
+ p_cid = qed_iov_get_vf_rx_queue_cid(p_queue);
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[%d] - Tried Closing Rx 0x%04x.%02x, but Rx is at %04x.%02x\n",
+ vf->relative_vf_id,
+ rxq_id, qid_usage_idx, rxq_id, p_cid->qid_usage_idx);
+ return -EINVAL;
+ }
+
+ /* Now that we know we have a valid Rx-queue - close it */
+ rc = qed_eth_rx_queue_stop(p_hwfn,
+ p_queue->cids[qid_usage_idx].p_cid,
+ false, cqe_completion);
+ if (rc)
+ return rc;
+
+ p_queue->cids[qid_usage_idx].p_cid = NULL;
+ vf->num_active_rxqs--;
+
+ return 0;
+}
+
+static int qed_iov_vf_stop_txqs(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *vf,
+ u16 txq_id, u8 qid_usage_idx)
+{
+ struct qed_vf_queue *p_queue;
+ int rc = 0;
+
+ if (!qed_iov_validate_txq(p_hwfn, vf, txq_id, QED_IOV_VALIDATE_Q_NA))
+ return -EINVAL;
+
+ p_queue = &vf->vf_queues[txq_id];
+ if (!p_queue->cids[qid_usage_idx].p_cid ||
+ !p_queue->cids[qid_usage_idx].b_is_tx)
+ return -EINVAL;
+
+ rc = qed_eth_tx_queue_stop(p_hwfn, p_queue->cids[qid_usage_idx].p_cid);
+ if (rc)
+ return rc;
+
+ p_queue->cids[qid_usage_idx].p_cid = NULL;
+ return 0;
+}
+
+static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ u16 length = sizeof(struct pfvf_def_resp_tlv);
+ struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+ u8 status = PFVF_STATUS_FAILURE;
+ struct vfpf_stop_rxqs_tlv *req;
+ u8 qid_usage_idx;
+ int rc;
+
+ /* There has never been an official driver that used this interface
+ * for stopping multiple queues, and it is now considered deprecated.
+ * Validate this isn't used here.
+ */
+ req = &mbx->req_virt->stop_rxqs;
+ if (req->num_rxqs != 1) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Odd; VF[%d] tried stopping multiple Rx queues\n",
+ vf->relative_vf_id);
+ status = PFVF_STATUS_NOT_SUPPORTED;
+ goto out;
+ }
+
+ /* Find which qid-index is associated with the queue */
+ qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false);
+ if (qid_usage_idx == QED_IOV_QID_INVALID)
+ goto out;
+
+ rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
+ qid_usage_idx, req->cqe_completion);
+ if (!rc)
+ status = PFVF_STATUS_SUCCESS;
+out:
+ qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS,
+ length, status);
+}
+
+static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ u16 length = sizeof(struct pfvf_def_resp_tlv);
+ struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+ u8 status = PFVF_STATUS_FAILURE;
+ struct vfpf_stop_txqs_tlv *req;
+ u8 qid_usage_idx;
+ int rc;
+
+ /* There has never been an official driver that used this interface
+ * for stopping multiple queues, and it is now considered deprecated.
+ * Validate this isn't used here.
+ */
+ req = &mbx->req_virt->stop_txqs;
+ if (req->num_txqs != 1) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Odd; VF[%d] tried stopping multiple Tx queues\n",
+ vf->relative_vf_id);
+ status = PFVF_STATUS_NOT_SUPPORTED;
+ goto out;
+ }
+
+ /* Find which qid-index is associated with the queue */
+ qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, true);
+ if (qid_usage_idx == QED_IOV_QID_INVALID)
+ goto out;
+
+ rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, qid_usage_idx);
+ if (!rc)
+ status = PFVF_STATUS_SUCCESS;
+
+out:
+ qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS,
+ length, status);
+}
+
+static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ struct qed_queue_cid *handlers[QED_MAX_VF_CHAINS_PER_PF];
+ u16 length = sizeof(struct pfvf_def_resp_tlv);
+ struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+ struct vfpf_update_rxq_tlv *req;
+ u8 status = PFVF_STATUS_FAILURE;
+ u8 complete_event_flg;
+ u8 complete_cqe_flg;
+ u8 qid_usage_idx;
+ int rc;
+ u8 i;
+
+ req = &mbx->req_virt->update_rxq;
+ complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
+ complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
+
+ qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false);
+ if (qid_usage_idx == QED_IOV_QID_INVALID)
+ goto out;
+
+ /* There shouldn't exist a VF that uses queue-qids yet uses this
+ * API with multiple Rx queues. Validate this.
+ */
+ if ((vf->acquire.vfdev_info.capabilities &
+ VFPF_ACQUIRE_CAP_QUEUE_QIDS) && req->num_rxqs != 1) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF[%d] supports QIDs but sends multiple queues\n",
+ vf->relative_vf_id);
+ goto out;
+ }
+
+ /* Validate inputs - for the legacy case this is still true since
+ * qid_usage_idx for each Rx queue would be LEGACY_QID_RX.
+ */
+ for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++) {
+ if (!qed_iov_validate_rxq(p_hwfn, vf, i,
+ QED_IOV_VALIDATE_Q_NA) ||
+ !vf->vf_queues[i].cids[qid_usage_idx].p_cid ||
+ vf->vf_queues[i].cids[qid_usage_idx].b_is_tx) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
+ vf->relative_vf_id, req->rx_qid,
+ req->num_rxqs);
+ goto out;
+ }
+ }
+
+ /* Prepare the handlers */
+ for (i = 0; i < req->num_rxqs; i++) {
+ u16 qid = req->rx_qid + i;
+
+ handlers[i] = vf->vf_queues[qid].cids[qid_usage_idx].p_cid;
+ }
+
+ rc = qed_sp_eth_rx_queues_update(p_hwfn, (void **)&handlers,
+ req->num_rxqs,
+ complete_cqe_flg,
+ complete_event_flg,
+ QED_SPQ_MODE_EBLOCK, NULL);
+ if (rc)
+ goto out;
+
+ status = PFVF_STATUS_SUCCESS;
+out:
+ qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ,
+ length, status);
+}
+
+void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn,
+ void *p_tlvs_list, u16 req_type)
+{
+ struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list;
+ int len = 0;
+
+ do {
+ if (!p_tlv->length) {
+ DP_NOTICE(p_hwfn, "Zero length TLV found\n");
+ return NULL;
+ }
+
+ if (p_tlv->type == req_type) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Extended tlv type %d, length %d found\n",
+ p_tlv->type, p_tlv->length);
+ return p_tlv;
+ }
+
+ len += p_tlv->length;
+ p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length);
+
+ if ((len + p_tlv->length) > TLV_BUFFER_SIZE) {
+ DP_NOTICE(p_hwfn, "TLVs has overrun the buffer size\n");
+ return NULL;
+ }
+ } while (p_tlv->type != CHANNEL_TLV_LIST_END);
+
+ return NULL;
+}
+
+static void
+qed_iov_vp_update_act_param(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_data,
+ struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+ struct vfpf_vport_update_activate_tlv *p_act_tlv;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
+
+ p_act_tlv = (struct vfpf_vport_update_activate_tlv *)
+ qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+ if (!p_act_tlv)
+ return;
+
+ p_data->update_vport_active_rx_flg = p_act_tlv->update_rx;
+ p_data->vport_active_rx_flg = p_act_tlv->active_rx;
+ p_data->update_vport_active_tx_flg = p_act_tlv->update_tx;
+ p_data->vport_active_tx_flg = p_act_tlv->active_tx;
+ *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACTIVATE;
+}
+
+static void
+qed_iov_vp_update_vlan_param(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_data,
+ struct qed_vf_info *p_vf,
+ struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+ struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
+
+ p_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *)
+ qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+ if (!p_vlan_tlv)
+ return;
+
+ p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan;
+
+ /* Ignore the VF request if we're forcing a vlan */
+ if (!(p_vf->configured_features & BIT(VLAN_ADDR_FORCED))) {
+ p_data->update_inner_vlan_removal_flg = 1;
+ p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan;
+ }
+
+ *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_VLAN_STRIP;
+}
+
+static void
+qed_iov_vp_update_tx_switch(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_data,
+ struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+ struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
+
+ p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *)
+ qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
+ tlv);
+ if (!p_tx_switch_tlv)
+ return;
+
+ p_data->update_tx_switching_flg = 1;
+ p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching;
+ *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_TX_SWITCH;
+}
+
+static void
+qed_iov_vp_update_mcast_bin_param(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_data,
+ struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+ struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST;
+
+ p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *)
+ qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+ if (!p_mcast_tlv)
+ return;
+
+ p_data->update_approx_mcast_flg = 1;
+ memcpy(p_data->bins, p_mcast_tlv->bins,
+ sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
+ *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_MCAST;
+}
+
+static void
+qed_iov_vp_update_accept_flag(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_data,
+ struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+ struct qed_filter_accept_flags *p_flags = &p_data->accept_flags;
+ struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
+
+ p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *)
+ qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+ if (!p_accept_tlv)
+ return;
+
+ p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode;
+ p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter;
+ p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode;
+ p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter;
+ *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_PARAM;
+}
+
+static void
+qed_iov_vp_update_accept_any_vlan(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_data,
+ struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+ struct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
+
+ p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *)
+ qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
+ tlv);
+ if (!p_accept_any_vlan)
+ return;
+
+ p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan;
+ p_data->update_accept_any_vlan_flg =
+ p_accept_any_vlan->update_accept_any_vlan_flg;
+ *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN;
+}
+
+static void
+qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *vf,
+ struct qed_sp_vport_update_params *p_data,
+ struct qed_rss_params *p_rss,
+ struct qed_iov_vf_mbx *p_mbx,
+ u16 *tlvs_mask, u16 *tlvs_accepted)
+{
+ struct vfpf_vport_update_rss_tlv *p_rss_tlv;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
+ bool b_reject = false;
+ u16 table_size;
+ u16 i, q_idx;
+
+ p_rss_tlv = (struct vfpf_vport_update_rss_tlv *)
+ qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+ if (!p_rss_tlv) {
+ p_data->rss_params = NULL;
+ return;
+ }
+
+ memset(p_rss, 0, sizeof(struct qed_rss_params));
+
+ p_rss->update_rss_config = !!(p_rss_tlv->update_rss_flags &
+ VFPF_UPDATE_RSS_CONFIG_FLAG);
+ p_rss->update_rss_capabilities = !!(p_rss_tlv->update_rss_flags &
+ VFPF_UPDATE_RSS_CAPS_FLAG);
+ p_rss->update_rss_ind_table = !!(p_rss_tlv->update_rss_flags &
+ VFPF_UPDATE_RSS_IND_TABLE_FLAG);
+ p_rss->update_rss_key = !!(p_rss_tlv->update_rss_flags &
+ VFPF_UPDATE_RSS_KEY_FLAG);
+
+ p_rss->rss_enable = p_rss_tlv->rss_enable;
+ p_rss->rss_eng_id = vf->relative_vf_id + 1;
+ p_rss->rss_caps = p_rss_tlv->rss_caps;
+ p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;
+ memcpy(p_rss->rss_key, p_rss_tlv->rss_key, sizeof(p_rss->rss_key));
+
+ table_size = min_t(u16, ARRAY_SIZE(p_rss->rss_ind_table),
+ (1 << p_rss_tlv->rss_table_size_log));
+
+ for (i = 0; i < table_size; i++) {
+ struct qed_queue_cid *p_cid;
+
+ q_idx = p_rss_tlv->rss_ind_table[i];
+ if (!qed_iov_validate_rxq(p_hwfn, vf, q_idx,
+ QED_IOV_VALIDATE_Q_ENABLE)) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[%d]: Omitting RSS due to wrong queue %04x\n",
+ vf->relative_vf_id, q_idx);
+ b_reject = true;
+ goto out;
+ }
+
+ p_cid = qed_iov_get_vf_rx_queue_cid(&vf->vf_queues[q_idx]);
+ p_rss->rss_ind_table[i] = p_cid;
+ }
+
+ p_data->rss_params = p_rss;
+out:
+ *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_RSS;
+ if (!b_reject)
+ *tlvs_accepted |= 1 << QED_IOV_VP_UPDATE_RSS;
+}
+
+static void
+qed_iov_vp_update_sge_tpa_param(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *vf,
+ struct qed_sp_vport_update_params *p_data,
+ struct qed_sge_tpa_params *p_sge_tpa,
+ struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+ struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
+
+ p_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *)
+ qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+
+ if (!p_sge_tpa_tlv) {
+ p_data->sge_tpa_params = NULL;
+ return;
+ }
+
+ memset(p_sge_tpa, 0, sizeof(struct qed_sge_tpa_params));
+
+ p_sge_tpa->update_tpa_en_flg =
+ !!(p_sge_tpa_tlv->update_sge_tpa_flags & VFPF_UPDATE_TPA_EN_FLAG);
+ p_sge_tpa->update_tpa_param_flg =
+ !!(p_sge_tpa_tlv->update_sge_tpa_flags &
+ VFPF_UPDATE_TPA_PARAM_FLAG);
+
+ p_sge_tpa->tpa_ipv4_en_flg =
+ !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV4_EN_FLAG);
+ p_sge_tpa->tpa_ipv6_en_flg =
+ !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV6_EN_FLAG);
+ p_sge_tpa->tpa_pkt_split_flg =
+ !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_PKT_SPLIT_FLAG);
+ p_sge_tpa->tpa_hdr_data_split_flg =
+ !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_HDR_DATA_SPLIT_FLAG);
+ p_sge_tpa->tpa_gro_consistent_flg =
+ !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_GRO_CONSIST_FLAG);
+
+ p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num;
+ p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size;
+ p_sge_tpa->tpa_min_size_to_start = p_sge_tpa_tlv->tpa_min_size_to_start;
+ p_sge_tpa->tpa_min_size_to_cont = p_sge_tpa_tlv->tpa_min_size_to_cont;
+ p_sge_tpa->max_buffers_per_cqe = p_sge_tpa_tlv->max_buffers_per_cqe;
+
+ p_data->sge_tpa_params = p_sge_tpa;
+
+ *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_SGE_TPA;
+}
+
+static int qed_iov_pre_update_vport(struct qed_hwfn *hwfn,
+ u8 vfid,
+ struct qed_sp_vport_update_params *params,
+ u16 *tlvs)
+{
+ u8 mask = QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED;
+ struct qed_filter_accept_flags *flags = &params->accept_flags;
+ struct qed_public_vf_info *vf_info;
+ u16 tlv_mask;
+
+ tlv_mask = BIT(QED_IOV_VP_UPDATE_ACCEPT_PARAM) |
+ BIT(QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN);
+
+ /* Untrusted VFs can't even be trusted to know that fact.
+ * Simply indicate everything is configured fine, and trace
+ * configuration 'behind their back'.
+ */
+ if (!(*tlvs & tlv_mask))
+ return 0;
+
+ vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
+
+ if (flags->update_rx_mode_config) {
+ vf_info->rx_accept_mode = flags->rx_accept_filter;
+ if (!vf_info->is_trusted_configured)
+ flags->rx_accept_filter &= ~mask;
+ }
+
+ if (flags->update_tx_mode_config) {
+ vf_info->tx_accept_mode = flags->tx_accept_filter;
+ if (!vf_info->is_trusted_configured)
+ flags->tx_accept_filter &= ~mask;
+ }
+
+ if (params->update_accept_any_vlan_flg) {
+ vf_info->accept_any_vlan = params->accept_any_vlan;
+
+ if (vf_info->forced_vlan && !vf_info->is_trusted_configured)
+ params->accept_any_vlan = false;
+ }
+
+ return 0;
+}
+
+static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ struct qed_rss_params *p_rss_params = NULL;
+ struct qed_sp_vport_update_params params;
+ struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+ struct qed_sge_tpa_params sge_tpa_params;
+ u16 tlvs_mask = 0, tlvs_accepted = 0;
+ u8 status = PFVF_STATUS_SUCCESS;
+ u16 length;
+ int rc;
+
+ /* Valiate PF can send such a request */
+ if (!vf->vport_instance) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "No VPORT instance available for VF[%d], failing vport update\n",
+ vf->abs_vf_id);
+ status = PFVF_STATUS_FAILURE;
+ goto out;
+ }
+ p_rss_params = vzalloc(sizeof(*p_rss_params));
+ if (!p_rss_params) {
+ status = PFVF_STATUS_FAILURE;
+ goto out;
+ }
+
+ memset(&params, 0, sizeof(params));
+ params.opaque_fid = vf->opaque_fid;
+ params.vport_id = vf->vport_id;
+ params.rss_params = NULL;
+
+ /* Search for extended tlvs list and update values
+ * from VF in struct qed_sp_vport_update_params.
+ */
+ qed_iov_vp_update_act_param(p_hwfn, &params, mbx, &tlvs_mask);
+ qed_iov_vp_update_vlan_param(p_hwfn, &params, vf, mbx, &tlvs_mask);
+ qed_iov_vp_update_tx_switch(p_hwfn, &params, mbx, &tlvs_mask);
+ qed_iov_vp_update_mcast_bin_param(p_hwfn, &params, mbx, &tlvs_mask);
+ qed_iov_vp_update_accept_flag(p_hwfn, &params, mbx, &tlvs_mask);
+ qed_iov_vp_update_accept_any_vlan(p_hwfn, &params, mbx, &tlvs_mask);
+ qed_iov_vp_update_sge_tpa_param(p_hwfn, vf, &params,
+ &sge_tpa_params, mbx, &tlvs_mask);
+
+ tlvs_accepted = tlvs_mask;
+
+ /* Some of the extended TLVs need to be validated first; In that case,
+ * they can update the mask without updating the accepted [so that
+ * PF could communicate to VF it has rejected request].
+ */
+ qed_iov_vp_update_rss_param(p_hwfn, vf, &params, p_rss_params,
+ mbx, &tlvs_mask, &tlvs_accepted);
+
+ if (qed_iov_pre_update_vport(p_hwfn, vf->relative_vf_id,
+ &params, &tlvs_accepted)) {
+ tlvs_accepted = 0;
+ status = PFVF_STATUS_NOT_SUPPORTED;
+ goto out;
+ }
+
+ if (!tlvs_accepted) {
+ if (tlvs_mask)
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Upper-layer prevents VF vport configuration\n");
+ else
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "No feature tlvs found for vport update\n");
+ status = PFVF_STATUS_NOT_SUPPORTED;
+ goto out;
+ }
+
+ rc = qed_sp_vport_update(p_hwfn, &params, QED_SPQ_MODE_EBLOCK, NULL);
+
+ if (rc)
+ status = PFVF_STATUS_FAILURE;
+
+out:
+ vfree(p_rss_params);
+ length = qed_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status,
+ tlvs_mask, tlvs_accepted);
+ qed_iov_send_response(p_hwfn, p_ptt, vf, length, status);
+}
+
+static int qed_iov_vf_update_vlan_shadow(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf,
+ struct qed_filter_ucast *p_params)
+{
+ int i;
+
+ /* First remove entries and then add new ones */
+ if (p_params->opcode == QED_FILTER_REMOVE) {
+ for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
+ if (p_vf->shadow_config.vlans[i].used &&
+ p_vf->shadow_config.vlans[i].vid ==
+ p_params->vlan) {
+ p_vf->shadow_config.vlans[i].used = false;
+ break;
+ }
+ if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF [%d] - Tries to remove a non-existing vlan\n",
+ p_vf->relative_vf_id);
+ return -EINVAL;
+ }
+ } else if (p_params->opcode == QED_FILTER_REPLACE ||
+ p_params->opcode == QED_FILTER_FLUSH) {
+ for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
+ p_vf->shadow_config.vlans[i].used = false;
+ }
+
+ /* In forced mode, we're willing to remove entries - but we don't add
+ * new ones.
+ */
+ if (p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED))
+ return 0;
+
+ if (p_params->opcode == QED_FILTER_ADD ||
+ p_params->opcode == QED_FILTER_REPLACE) {
+ for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
+ if (p_vf->shadow_config.vlans[i].used)
+ continue;
+
+ p_vf->shadow_config.vlans[i].used = true;
+ p_vf->shadow_config.vlans[i].vid = p_params->vlan;
+ break;
+ }
+
+ if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF [%d] - Tries to configure more than %d vlan filters\n",
+ p_vf->relative_vf_id,
+ QED_ETH_VF_NUM_VLAN_FILTERS + 1);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf,
+ struct qed_filter_ucast *p_params)
+{
+ int i;
+
+ /* If we're in forced-mode, we don't allow any change */
+ if (p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED))
+ return 0;
+
+ /* Don't keep track of shadow copy since we don't intend to restore. */
+ if (p_vf->p_vf_info.is_trusted_configured)
+ return 0;
+
+ /* First remove entries and then add new ones */
+ if (p_params->opcode == QED_FILTER_REMOVE) {
+ for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
+ if (ether_addr_equal(p_vf->shadow_config.macs[i],
+ p_params->mac)) {
+ eth_zero_addr(p_vf->shadow_config.macs[i]);
+ break;
+ }
+ }
+
+ if (i == QED_ETH_VF_NUM_MAC_FILTERS) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "MAC isn't configured\n");
+ return -EINVAL;
+ }
+ } else if (p_params->opcode == QED_FILTER_REPLACE ||
+ p_params->opcode == QED_FILTER_FLUSH) {
+ for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++)
+ eth_zero_addr(p_vf->shadow_config.macs[i]);
+ }
+
+ /* List the new MAC address */
+ if (p_params->opcode != QED_FILTER_ADD &&
+ p_params->opcode != QED_FILTER_REPLACE)
+ return 0;
+
+ for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
+ if (is_zero_ether_addr(p_vf->shadow_config.macs[i])) {
+ ether_addr_copy(p_vf->shadow_config.macs[i],
+ p_params->mac);
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Added MAC at %d entry in shadow\n", i);
+ break;
+ }
+ }
+
+ if (i == QED_ETH_VF_NUM_MAC_FILTERS) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No available place for MAC\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+qed_iov_vf_update_unicast_shadow(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf,
+ struct qed_filter_ucast *p_params)
+{
+ int rc = 0;
+
+ if (p_params->type == QED_FILTER_MAC) {
+ rc = qed_iov_vf_update_mac_shadow(p_hwfn, p_vf, p_params);
+ if (rc)
+ return rc;
+ }
+
+ if (p_params->type == QED_FILTER_VLAN)
+ rc = qed_iov_vf_update_vlan_shadow(p_hwfn, p_vf, p_params);
+
+ return rc;
+}
+
+static int qed_iov_chk_ucast(struct qed_hwfn *hwfn,
+ int vfid, struct qed_filter_ucast *params)
+{
+ struct qed_public_vf_info *vf;
+
+ vf = qed_iov_get_public_vf_info(hwfn, vfid, true);
+ if (!vf)
+ return -EINVAL;
+
+ /* No real decision to make; Store the configured MAC */
+ if (params->type == QED_FILTER_MAC ||
+ params->type == QED_FILTER_MAC_VLAN) {
+ ether_addr_copy(vf->mac, params->mac);
+
+ if (vf->is_trusted_configured) {
+ qed_iov_bulletin_set_mac(hwfn, vf->mac, vfid);
+
+ /* Update and post bulleitin again */
+ qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
+ }
+ }
+
+ return 0;
+}
+
+static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ struct qed_bulletin_content *p_bulletin = vf->bulletin.p_virt;
+ struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+ struct vfpf_ucast_filter_tlv *req;
+ u8 status = PFVF_STATUS_SUCCESS;
+ struct qed_filter_ucast params;
+ int rc;
+
+ /* Prepare the unicast filter params */
+ memset(&params, 0, sizeof(struct qed_filter_ucast));
+ req = &mbx->req_virt->ucast_filter;
+ params.opcode = (enum qed_filter_opcode)req->opcode;
+ params.type = (enum qed_filter_ucast_type)req->type;
+
+ params.is_rx_filter = 1;
+ params.is_tx_filter = 1;
+ params.vport_to_remove_from = vf->vport_id;
+ params.vport_to_add_to = vf->vport_id;
+ memcpy(params.mac, req->mac, ETH_ALEN);
+ params.vlan = req->vlan;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x] MAC %pM, vlan 0x%04x\n",
+ vf->abs_vf_id, params.opcode, params.type,
+ params.is_rx_filter ? "RX" : "",
+ params.is_tx_filter ? "TX" : "",
+ params.vport_to_add_to,
+ params.mac, params.vlan);
+
+ if (!vf->vport_instance) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "No VPORT instance available for VF[%d], failing ucast MAC configuration\n",
+ vf->abs_vf_id);
+ status = PFVF_STATUS_FAILURE;
+ goto out;
+ }
+
+ /* Update shadow copy of the VF configuration */
+ if (qed_iov_vf_update_unicast_shadow(p_hwfn, vf, &params)) {
+ status = PFVF_STATUS_FAILURE;
+ goto out;
+ }
+
+ /* Determine if the unicast filtering is acceptible by PF */
+ if ((p_bulletin->valid_bitmap & BIT(VLAN_ADDR_FORCED)) &&
+ (params.type == QED_FILTER_VLAN ||
+ params.type == QED_FILTER_MAC_VLAN)) {
+ /* Once VLAN is forced or PVID is set, do not allow
+ * to add/replace any further VLANs.
+ */
+ if (params.opcode == QED_FILTER_ADD ||
+ params.opcode == QED_FILTER_REPLACE)
+ status = PFVF_STATUS_FORCED;
+ goto out;
+ }
+
+ if ((p_bulletin->valid_bitmap & BIT(MAC_ADDR_FORCED)) &&
+ (params.type == QED_FILTER_MAC ||
+ params.type == QED_FILTER_MAC_VLAN)) {
+ if (!ether_addr_equal(p_bulletin->mac, params.mac) ||
+ (params.opcode != QED_FILTER_ADD &&
+ params.opcode != QED_FILTER_REPLACE))
+ status = PFVF_STATUS_FORCED;
+ goto out;
+ }
+
+ rc = qed_iov_chk_ucast(p_hwfn, vf->relative_vf_id, &params);
+ if (rc) {
+ status = PFVF_STATUS_FAILURE;
+ goto out;
+ }
+
+ rc = qed_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, &params,
+ QED_SPQ_MODE_CB, NULL);
+ if (rc)
+ status = PFVF_STATUS_FAILURE;
+
+out:
+ qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER,
+ sizeof(struct pfvf_def_resp_tlv), status);
+}
+
+static void qed_iov_vf_mbx_int_cleanup(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ int i;
+
+ /* Reset the SBs */
+ for (i = 0; i < vf->num_sbs; i++)
+ qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
+ vf->igu_sbs[i],
+ vf->opaque_fid, false);
+
+ qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP,
+ sizeof(struct pfvf_def_resp_tlv),
+ PFVF_STATUS_SUCCESS);
+}
+
+static void qed_iov_vf_mbx_close(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, struct qed_vf_info *vf)
+{
+ u16 length = sizeof(struct pfvf_def_resp_tlv);
+ u8 status = PFVF_STATUS_SUCCESS;
+
+ /* Disable Interrupts for VF */
+ qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
+
+ /* Reset Permission table */
+ qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
+
+ qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE,
+ length, status);
+}
+
+static void qed_iov_vf_mbx_release(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *p_vf)
+{
+ u16 length = sizeof(struct pfvf_def_resp_tlv);
+ u8 status = PFVF_STATUS_SUCCESS;
+ int rc = 0;
+
+ qed_iov_vf_cleanup(p_hwfn, p_vf);
+
+ if (p_vf->state != VF_STOPPED && p_vf->state != VF_FREE) {
+ /* Stopping the VF */
+ rc = qed_sp_vf_stop(p_hwfn, p_vf->concrete_fid,
+ p_vf->opaque_fid);
+
+ if (rc) {
+ DP_ERR(p_hwfn, "qed_sp_vf_stop returned error %d\n",
+ rc);
+ status = PFVF_STATUS_FAILURE;
+ }
+
+ p_vf->state = VF_STOPPED;
+ }
+
+ qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE,
+ length, status);
+}
+
+static void qed_iov_vf_pf_get_coalesce(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *p_vf)
+{
+ struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
+ struct pfvf_read_coal_resp_tlv *p_resp;
+ struct vfpf_read_coal_req_tlv *req;
+ u8 status = PFVF_STATUS_FAILURE;
+ struct qed_vf_queue *p_queue;
+ struct qed_queue_cid *p_cid;
+ u16 coal = 0, qid, i;
+ bool b_is_rx;
+ int rc = 0;
+
+ mbx->offset = (u8 *)mbx->reply_virt;
+ req = &mbx->req_virt->read_coal_req;
+
+ qid = req->qid;
+ b_is_rx = req->is_rx ? true : false;
+
+ if (b_is_rx) {
+ if (!qed_iov_validate_rxq(p_hwfn, p_vf, qid,
+ QED_IOV_VALIDATE_Q_ENABLE)) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF[%d]: Invalid Rx queue_id = %d\n",
+ p_vf->abs_vf_id, qid);
+ goto send_resp;
+ }
+
+ p_cid = qed_iov_get_vf_rx_queue_cid(&p_vf->vf_queues[qid]);
+ rc = qed_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, &coal);
+ if (rc)
+ goto send_resp;
+ } else {
+ if (!qed_iov_validate_txq(p_hwfn, p_vf, qid,
+ QED_IOV_VALIDATE_Q_ENABLE)) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF[%d]: Invalid Tx queue_id = %d\n",
+ p_vf->abs_vf_id, qid);
+ goto send_resp;
+ }
+ for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
+ p_queue = &p_vf->vf_queues[qid];
+ if ((!p_queue->cids[i].p_cid) ||
+ (!p_queue->cids[i].b_is_tx))
+ continue;
+
+ p_cid = p_queue->cids[i].p_cid;
+
+ rc = qed_get_txq_coalesce(p_hwfn, p_ptt, p_cid, &coal);
+ if (rc)
+ goto send_resp;
+ break;
+ }
+ }
+
+ status = PFVF_STATUS_SUCCESS;
+
+send_resp:
+ p_resp = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_COALESCE_READ,
+ sizeof(*p_resp));
+ p_resp->coal = coal;
+
+ qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
+}
+
+static void qed_iov_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+ struct vfpf_update_coalesce *req;
+ u8 status = PFVF_STATUS_FAILURE;
+ struct qed_queue_cid *p_cid;
+ u16 rx_coal, tx_coal;
+ int rc = 0, i;
+ u16 qid;
+
+ req = &mbx->req_virt->update_coalesce;
+
+ rx_coal = req->rx_coal;
+ tx_coal = req->tx_coal;
+ qid = req->qid;
+
+ if (!qed_iov_validate_rxq(p_hwfn, vf, qid,
+ QED_IOV_VALIDATE_Q_ENABLE) && rx_coal) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF[%d]: Invalid Rx queue_id = %d\n",
+ vf->abs_vf_id, qid);
+ goto out;
+ }
+
+ if (!qed_iov_validate_txq(p_hwfn, vf, qid,
+ QED_IOV_VALIDATE_Q_ENABLE) && tx_coal) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF[%d]: Invalid Tx queue_id = %d\n",
+ vf->abs_vf_id, qid);
+ goto out;
+ }
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n",
+ vf->abs_vf_id, rx_coal, tx_coal, qid);
+
+ if (rx_coal) {
+ p_cid = qed_iov_get_vf_rx_queue_cid(&vf->vf_queues[qid]);
+
+ rc = qed_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
+ if (rc) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[%d]: Unable to set rx queue = %d coalesce\n",
+ vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid);
+ goto out;
+ }
+ vf->rx_coal = rx_coal;
+ }
+
+ if (tx_coal) {
+ struct qed_vf_queue *p_queue = &vf->vf_queues[qid];
+
+ for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
+ if (!p_queue->cids[i].p_cid)
+ continue;
+
+ if (!p_queue->cids[i].b_is_tx)
+ continue;
+
+ rc = qed_set_txq_coalesce(p_hwfn, p_ptt, tx_coal,
+ p_queue->cids[i].p_cid);
+
+ if (rc) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[%d]: Unable to set tx queue coalesce\n",
+ vf->abs_vf_id);
+ goto out;
+ }
+ }
+ vf->tx_coal = tx_coal;
+ }
+
+ status = PFVF_STATUS_SUCCESS;
+out:
+ qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_COALESCE_UPDATE,
+ sizeof(struct pfvf_def_resp_tlv), status);
+}
+
+static int
+qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
+{
+ int cnt;
+ u32 val;
+
+ qed_fid_pretend(p_hwfn, p_ptt, (u16)p_vf->concrete_fid);
+
+ for (cnt = 0; cnt < 50; cnt++) {
+ val = qed_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT);
+ if (!val)
+ break;
+ msleep(20);
+ }
+ qed_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
+
+ if (cnt == 50) {
+ DP_ERR(p_hwfn,
+ "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n",
+ p_vf->abs_vf_id, val);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+#define MAX_NUM_EXT_VOQS (MAX_NUM_PORTS * NUM_OF_TCS)
+
+static int
+qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
+{
+ u32 prod, cons[MAX_NUM_EXT_VOQS], distance[MAX_NUM_EXT_VOQS], tmp;
+ u8 max_phys_tcs_per_port = p_hwfn->qm_info.max_phys_tcs_per_port;
+ u8 max_ports_per_engine = p_hwfn->cdev->num_ports_in_engine;
+ u32 prod_voq0_addr = PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0;
+ u32 cons_voq0_addr = PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0;
+ u8 port_id, tc, tc_id = 0, voq = 0;
+ int cnt;
+
+ memset(cons, 0, MAX_NUM_EXT_VOQS * sizeof(u32));
+ memset(distance, 0, MAX_NUM_EXT_VOQS * sizeof(u32));
+
+ /* Read initial consumers & producers */
+ for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
+ /* "max_phys_tcs_per_port" active TCs + 1 pure LB TC */
+ for (tc = 0; tc < max_phys_tcs_per_port + 1; tc++) {
+ tc_id = (tc < max_phys_tcs_per_port) ? tc : PURE_LB_TC;
+ voq = VOQ(port_id, tc_id, max_phys_tcs_per_port);
+ cons[voq] = qed_rd(p_hwfn, p_ptt,
+ cons_voq0_addr + voq * 0x40);
+ prod = qed_rd(p_hwfn, p_ptt,
+ prod_voq0_addr + voq * 0x40);
+ distance[voq] = prod - cons[voq];
+ }
+ }
+
+ /* Wait for consumers to pass the producers */
+ port_id = 0;
+ tc = 0;
+ for (cnt = 0; cnt < 50; cnt++) {
+ for (; port_id < max_ports_per_engine; port_id++) {
+ /* "max_phys_tcs_per_port" active TCs + 1 pure LB TC */
+ for (; tc < max_phys_tcs_per_port + 1; tc++) {
+ tc_id = (tc < max_phys_tcs_per_port) ?
+ tc : PURE_LB_TC;
+ voq = VOQ(port_id,
+ tc_id, max_phys_tcs_per_port);
+ tmp = qed_rd(p_hwfn, p_ptt,
+ cons_voq0_addr + voq * 0x40);
+ if (distance[voq] > tmp - cons[voq])
+ break;
+ }
+
+ if (tc == max_phys_tcs_per_port + 1)
+ tc = 0;
+ else
+ break;
+ }
+
+ if (port_id == max_ports_per_engine)
+ break;
+
+ msleep(20);
+ }
+
+ if (cnt == 50) {
+ DP_ERR(p_hwfn, "VF[%d]: pbf poll failed on VOQ%d\n",
+ p_vf->abs_vf_id, (int)voq);
+
+ DP_ERR(p_hwfn, "VOQ %d has port_id as %d and tc_id as %d]\n",
+ (int)voq, (int)port_id, (int)tc_id);
+
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int qed_iov_vf_flr_poll(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
+{
+ int rc;
+
+ rc = qed_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt);
+ if (rc)
+ return rc;
+
+ rc = qed_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static int
+qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 rel_vf_id, u32 *ack_vfs)
+{
+ struct qed_vf_info *p_vf;
+ int rc = 0;
+
+ p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
+ if (!p_vf)
+ return 0;
+
+ if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
+ (1ULL << (rel_vf_id % 64))) {
+ u16 vfid = p_vf->abs_vf_id;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF[%d] - Handling FLR\n", vfid);
+
+ qed_iov_vf_cleanup(p_hwfn, p_vf);
+
+ /* If VF isn't active, no need for anything but SW */
+ if (!p_vf->b_init)
+ goto cleanup;
+
+ rc = qed_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt);
+ if (rc)
+ goto cleanup;
+
+ rc = qed_final_cleanup(p_hwfn, p_ptt, vfid, true);
+ if (rc) {
+ DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", vfid);
+ return rc;
+ }
+
+ /* Workaround to make VF-PF channel ready, as FW
+ * doesn't do that as a part of FLR.
+ */
+ REG_WR(p_hwfn,
+ GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_USDM_RAM,
+ USTORM_VF_PF_CHANNEL_READY, vfid), 1);
+
+ /* VF_STOPPED has to be set only after final cleanup
+ * but prior to re-enabling the VF.
+ */
+ p_vf->state = VF_STOPPED;
+
+ rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, p_vf);
+ if (rc) {
+ DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n",
+ vfid);
+ return rc;
+ }
+cleanup:
+ /* Mark VF for ack and clean pending state */
+ if (p_vf->state == VF_RESET)
+ p_vf->state = VF_STOPPED;
+ ack_vfs[vfid / 32] |= BIT((vfid % 32));
+ p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
+ ~(1ULL << (rel_vf_id % 64));
+ p_vf->vf_mbx.b_pending_msg = false;
+ }
+
+ return rc;
+}
+
+static int
+qed_iov_vf_flr_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 ack_vfs[VF_MAX_STATIC / 32];
+ int rc = 0;
+ u16 i;
+
+ memset(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
+
+ /* Since BRB <-> PRS interface can't be tested as part of the flr
+ * polling due to HW limitations, simply sleep a bit. And since
+ * there's no need to wait per-vf, do it before looping.
+ */
+ msleep(100);
+
+ for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++)
+ qed_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs);
+
+ rc = qed_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
+ return rc;
+}
+
+bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
+{
+ bool found = false;
+ u16 i;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Marking FLR-ed VFs\n");
+ for (i = 0; i < (VF_MAX_STATIC / 32); i++)
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "[%08x,...,%08x]: %08x\n",
+ i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]);
+
+ if (!p_hwfn->cdev->p_iov_info) {
+ DP_NOTICE(p_hwfn, "VF flr but no IOV\n");
+ return false;
+ }
+
+ /* Mark VFs */
+ for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++) {
+ struct qed_vf_info *p_vf;
+ u8 vfid;
+
+ p_vf = qed_iov_get_vf_info(p_hwfn, i, false);
+ if (!p_vf)
+ continue;
+
+ vfid = p_vf->abs_vf_id;
+ if (BIT((vfid % 32)) & p_disabled_vfs[vfid / 32]) {
+ u64 *p_flr = p_hwfn->pf_iov_info->pending_flr;
+ u16 rel_vf_id = p_vf->relative_vf_id;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF[%d] [rel %d] got FLR-ed\n",
+ vfid, rel_vf_id);
+
+ p_vf->state = VF_RESET;
+
+ /* No need to lock here, since pending_flr should
+ * only change here and before ACKing MFw. Since
+ * MFW will not trigger an additional attention for
+ * VF flr until ACKs, we're safe.
+ */
+ p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64);
+ found = true;
+ }
+ }
+
+ return found;
+}
+
+static int qed_iov_get_link(struct qed_hwfn *p_hwfn,
+ u16 vfid,
+ struct qed_mcp_link_params *p_params,
+ struct qed_mcp_link_state *p_link,
+ struct qed_mcp_link_capabilities *p_caps)
+{
+ struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn,
+ vfid,
+ false);
+ struct qed_bulletin_content *p_bulletin;
+
+ if (!p_vf)
+ return -EINVAL;
+
+ p_bulletin = p_vf->bulletin.p_virt;
+
+ if (p_params)
+ __qed_vf_get_link_params(p_hwfn, p_params, p_bulletin);
+ if (p_link)
+ __qed_vf_get_link_state(p_hwfn, p_link, p_bulletin);
+ if (p_caps)
+ __qed_vf_get_link_caps(p_hwfn, p_caps, p_bulletin);
+ return 0;
+}
+
+static int
+qed_iov_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *p_vf)
+{
+ struct qed_bulletin_content *p_bulletin = p_vf->bulletin.p_virt;
+ struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
+ struct vfpf_bulletin_update_mac_tlv *p_req;
+ u8 status = PFVF_STATUS_SUCCESS;
+ int rc = 0;
+
+ if (!p_vf->p_vf_info.is_trusted_configured) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "Blocking bulletin update request from untrusted VF[%d]\n",
+ p_vf->abs_vf_id);
+ status = PFVF_STATUS_NOT_SUPPORTED;
+ rc = -EINVAL;
+ goto send_status;
+ }
+
+ p_req = &mbx->req_virt->bulletin_update_mac;
+ ether_addr_copy(p_bulletin->mac, p_req->mac);
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Updated bulletin of VF[%d] with requested MAC[%pM]\n",
+ p_vf->abs_vf_id, p_req->mac);
+
+send_status:
+ qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
+ CHANNEL_TLV_BULLETIN_UPDATE_MAC,
+ sizeof(struct pfvf_def_resp_tlv), status);
+ return rc;
+}
+
+static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, int vfid)
+{
+ struct qed_iov_vf_mbx *mbx;
+ struct qed_vf_info *p_vf;
+
+ p_vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!p_vf)
+ return;
+
+ mbx = &p_vf->vf_mbx;
+
+ /* qed_iov_process_mbx_request */
+ if (!mbx->b_pending_msg) {
+ DP_NOTICE(p_hwfn,
+ "VF[%02x]: Trying to process mailbox message when none is pending\n",
+ p_vf->abs_vf_id);
+ return;
+ }
+ mbx->b_pending_msg = false;
+
+ mbx->first_tlv = mbx->req_virt->first_tlv;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF[%02x]: Processing mailbox message [type %04x]\n",
+ p_vf->abs_vf_id, mbx->first_tlv.tl.type);
+
+ /* check if tlv type is known */
+ if (qed_iov_tlv_supported(mbx->first_tlv.tl.type) &&
+ !p_vf->b_malicious) {
+ switch (mbx->first_tlv.tl.type) {
+ case CHANNEL_TLV_ACQUIRE:
+ qed_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_VPORT_START:
+ qed_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_VPORT_TEARDOWN:
+ qed_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_START_RXQ:
+ qed_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_START_TXQ:
+ qed_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_STOP_RXQS:
+ qed_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_STOP_TXQS:
+ qed_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_UPDATE_RXQ:
+ qed_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_VPORT_UPDATE:
+ qed_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_UCAST_FILTER:
+ qed_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_CLOSE:
+ qed_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_INT_CLEANUP:
+ qed_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_RELEASE:
+ qed_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_UPDATE_TUNN_PARAM:
+ qed_iov_vf_mbx_update_tunn_param(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_COALESCE_UPDATE:
+ qed_iov_vf_pf_set_coalesce(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_COALESCE_READ:
+ qed_iov_vf_pf_get_coalesce(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_BULLETIN_UPDATE_MAC:
+ qed_iov_vf_pf_bulletin_update_mac(p_hwfn, p_ptt, p_vf);
+ break;
+ }
+ } else if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF [%02x] - considered malicious; Ignoring TLV [%04x]\n",
+ p_vf->abs_vf_id, mbx->first_tlv.tl.type);
+
+ qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
+ mbx->first_tlv.tl.type,
+ sizeof(struct pfvf_def_resp_tlv),
+ PFVF_STATUS_MALICIOUS);
+ } else {
+ /* unknown TLV - this may belong to a VF driver from the future
+ * - a version written after this PF driver was written, which
+ * supports features unknown as of yet. Too bad since we don't
+ * support them. Or this may be because someone wrote a crappy
+ * VF driver and is sending garbage over the channel.
+ */
+ DP_NOTICE(p_hwfn,
+ "VF[%02x]: unknown TLV. type %04x length %04x padding %08x reply address %llu\n",
+ p_vf->abs_vf_id,
+ mbx->first_tlv.tl.type,
+ mbx->first_tlv.tl.length,
+ mbx->first_tlv.padding, mbx->first_tlv.reply_address);
+
+ /* Try replying in case reply address matches the acquisition's
+ * posted address.
+ */
+ if (p_vf->acquire.first_tlv.reply_address &&
+ (mbx->first_tlv.reply_address ==
+ p_vf->acquire.first_tlv.reply_address)) {
+ qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
+ mbx->first_tlv.tl.type,
+ sizeof(struct pfvf_def_resp_tlv),
+ PFVF_STATUS_NOT_SUPPORTED);
+ } else {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[%02x]: Can't respond to TLV - no valid reply address\n",
+ p_vf->abs_vf_id);
+ }
+ }
+}
+
+static void qed_iov_pf_get_pending_events(struct qed_hwfn *p_hwfn, u64 *events)
+{
+ int i;
+
+ memset(events, 0, sizeof(u64) * QED_VF_ARRAY_LENGTH);
+
+ qed_for_each_vf(p_hwfn, i) {
+ struct qed_vf_info *p_vf;
+
+ p_vf = &p_hwfn->pf_iov_info->vfs_array[i];
+ if (p_vf->vf_mbx.b_pending_msg)
+ events[i / 64] |= 1ULL << (i % 64);
+ }
+}
+
+static struct qed_vf_info *qed_sriov_get_vf_from_absid(struct qed_hwfn *p_hwfn,
+ u16 abs_vfid)
+{
+ u8 min = (u8)p_hwfn->cdev->p_iov_info->first_vf_in_pf;
+
+ if (!_qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "Got indication for VF [abs 0x%08x] that cannot be handled by PF\n",
+ abs_vfid);
+ return NULL;
+ }
+
+ return &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min];
+}
+
+static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn,
+ u16 abs_vfid, struct regpair *vf_msg)
+{
+ struct qed_vf_info *p_vf = qed_sriov_get_vf_from_absid(p_hwfn,
+ abs_vfid);
+
+ if (!p_vf)
+ return 0;
+
+ /* List the physical address of the request so that handler
+ * could later on copy the message from it.
+ */
+ p_vf->vf_mbx.pending_req = HILO_64(vf_msg->hi, vf_msg->lo);
+
+ /* Mark the event and schedule the workqueue */
+ p_vf->vf_mbx.b_pending_msg = true;
+ qed_schedule_iov(p_hwfn, QED_IOV_WQ_MSG_FLAG);
+
+ return 0;
+}
+
+void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
+ struct fw_err_data *p_data)
+{
+ struct qed_vf_info *p_vf;
+
+ p_vf = qed_sriov_get_vf_from_absid(p_hwfn, qed_vf_from_entity_id
+ (p_data->entity_id));
+ if (!p_vf)
+ return;
+
+ if (!p_vf->b_malicious) {
+ DP_NOTICE(p_hwfn,
+ "VF [%d] - Malicious behavior [%02x]\n",
+ p_vf->abs_vf_id, p_data->err_id);
+
+ p_vf->b_malicious = true;
+ } else {
+ DP_INFO(p_hwfn,
+ "VF [%d] - Malicious behavior [%02x]\n",
+ p_vf->abs_vf_id, p_data->err_id);
+ }
+}
+
+int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, __le16 echo,
+ union event_ring_data *data, u8 fw_return_code)
+{
+ switch (opcode) {
+ case COMMON_EVENT_VF_PF_CHANNEL:
+ return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo),
+ &data->vf_pf_channel.msg_addr);
+ default:
+ DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n",
+ opcode);
+ return -EINVAL;
+ }
+}
+
+u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
+{
+ struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
+ u16 i;
+
+ if (!p_iov)
+ goto out;
+
+ for (i = rel_vf_id; i < p_iov->total_vfs; i++)
+ if (qed_iov_is_valid_vfid(p_hwfn, rel_vf_id, true, false))
+ return i;
+
+out:
+ return MAX_NUM_VFS;
+}
+
+static int qed_iov_copy_vf_msg(struct qed_hwfn *p_hwfn, struct qed_ptt *ptt,
+ int vfid)
+{
+ struct qed_dmae_params params;
+ struct qed_vf_info *vf_info;
+
+ vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf_info)
+ return -EINVAL;
+
+ memset(&params, 0, sizeof(params));
+ SET_FIELD(params.flags, QED_DMAE_PARAMS_SRC_VF_VALID, 0x1);
+ SET_FIELD(params.flags, QED_DMAE_PARAMS_COMPLETION_DST, 0x1);
+ params.src_vfid = vf_info->abs_vf_id;
+
+ if (qed_dmae_host2host(p_hwfn, ptt,
+ vf_info->vf_mbx.pending_req,
+ vf_info->vf_mbx.req_phys,
+ sizeof(union vfpf_tlvs) / 4, &params)) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Failed to copy message from VF 0x%02x\n", vfid);
+
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn *p_hwfn,
+ u8 *mac, int vfid)
+{
+ struct qed_vf_info *vf_info;
+ u64 feature;
+
+ vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf_info) {
+ DP_NOTICE(p_hwfn->cdev,
+ "Can not set forced MAC, invalid vfid [%d]\n", vfid);
+ return;
+ }
+
+ if (vf_info->b_malicious) {
+ DP_NOTICE(p_hwfn->cdev,
+ "Can't set forced MAC to malicious VF [%d]\n", vfid);
+ return;
+ }
+
+ if (vf_info->p_vf_info.is_trusted_configured) {
+ feature = BIT(VFPF_BULLETIN_MAC_ADDR);
+ /* Trust mode will disable Forced MAC */
+ vf_info->bulletin.p_virt->valid_bitmap &=
+ ~BIT(MAC_ADDR_FORCED);
+ } else {
+ feature = BIT(MAC_ADDR_FORCED);
+ /* Forced MAC will disable MAC_ADDR */
+ vf_info->bulletin.p_virt->valid_bitmap &=
+ ~BIT(VFPF_BULLETIN_MAC_ADDR);
+ }
+
+ memcpy(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
+
+ vf_info->bulletin.p_virt->valid_bitmap |= feature;
+
+ qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
+}
+
+static int qed_iov_bulletin_set_mac(struct qed_hwfn *p_hwfn, u8 *mac, int vfid)
+{
+ struct qed_vf_info *vf_info;
+ u64 feature;
+
+ vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf_info) {
+ DP_NOTICE(p_hwfn->cdev, "Can not set MAC, invalid vfid [%d]\n",
+ vfid);
+ return -EINVAL;
+ }
+
+ if (vf_info->b_malicious) {
+ DP_NOTICE(p_hwfn->cdev, "Can't set MAC to malicious VF [%d]\n",
+ vfid);
+ return -EINVAL;
+ }
+
+ if (vf_info->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED)) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Can not set MAC, Forced MAC is configured\n");
+ return -EINVAL;
+ }
+
+ feature = BIT(VFPF_BULLETIN_MAC_ADDR);
+ ether_addr_copy(vf_info->bulletin.p_virt->mac, mac);
+
+ vf_info->bulletin.p_virt->valid_bitmap |= feature;
+
+ if (vf_info->p_vf_info.is_trusted_configured)
+ qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
+
+ return 0;
+}
+
+static void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn,
+ u16 pvid, int vfid)
+{
+ struct qed_vf_info *vf_info;
+ u64 feature;
+
+ vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf_info) {
+ DP_NOTICE(p_hwfn->cdev,
+ "Can not set forced MAC, invalid vfid [%d]\n", vfid);
+ return;
+ }
+
+ if (vf_info->b_malicious) {
+ DP_NOTICE(p_hwfn->cdev,
+ "Can't set forced vlan to malicious VF [%d]\n", vfid);
+ return;
+ }
+
+ feature = 1 << VLAN_ADDR_FORCED;
+ vf_info->bulletin.p_virt->pvid = pvid;
+ if (pvid)
+ vf_info->bulletin.p_virt->valid_bitmap |= feature;
+ else
+ vf_info->bulletin.p_virt->valid_bitmap &= ~feature;
+
+ qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
+}
+
+void qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn,
+ int vfid, u16 vxlan_port, u16 geneve_port)
+{
+ struct qed_vf_info *vf_info;
+
+ vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf_info) {
+ DP_NOTICE(p_hwfn->cdev,
+ "Can not set udp ports, invalid vfid [%d]\n", vfid);
+ return;
+ }
+
+ if (vf_info->b_malicious) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Can not set udp ports to malicious VF [%d]\n",
+ vfid);
+ return;
+ }
+
+ vf_info->bulletin.p_virt->vxlan_udp_port = vxlan_port;
+ vf_info->bulletin.p_virt->geneve_udp_port = geneve_port;
+}
+
+static bool qed_iov_vf_has_vport_instance(struct qed_hwfn *p_hwfn, int vfid)
+{
+ struct qed_vf_info *p_vf_info;
+
+ p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!p_vf_info)
+ return false;
+
+ return !!p_vf_info->vport_instance;
+}
+
+static bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid)
+{
+ struct qed_vf_info *p_vf_info;
+
+ p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!p_vf_info)
+ return true;
+
+ return p_vf_info->state == VF_STOPPED;
+}
+
+static bool qed_iov_spoofchk_get(struct qed_hwfn *p_hwfn, int vfid)
+{
+ struct qed_vf_info *vf_info;
+
+ vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf_info)
+ return false;
+
+ return vf_info->spoof_chk;
+}
+
+static int qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, int vfid, bool val)
+{
+ struct qed_vf_info *vf;
+ int rc = -EINVAL;
+
+ if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
+ DP_NOTICE(p_hwfn,
+ "SR-IOV sanity check failed, can't set spoofchk\n");
+ goto out;
+ }
+
+ vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf)
+ goto out;
+
+ if (!qed_iov_vf_has_vport_instance(p_hwfn, vfid)) {
+ /* After VF VPORT start PF will configure spoof check */
+ vf->req_spoofchk_val = val;
+ rc = 0;
+ goto out;
+ }
+
+ rc = __qed_iov_spoofchk_set(p_hwfn, vf, val);
+
+out:
+ return rc;
+}
+
+static u8 *qed_iov_bulletin_get_mac(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
+{
+ struct qed_vf_info *p_vf;
+
+ p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+ if (!p_vf || !p_vf->bulletin.p_virt)
+ return NULL;
+
+ if (!(p_vf->bulletin.p_virt->valid_bitmap &
+ BIT(VFPF_BULLETIN_MAC_ADDR)))
+ return NULL;
+
+ return p_vf->bulletin.p_virt->mac;
+}
+
+static u8 *qed_iov_bulletin_get_forced_mac(struct qed_hwfn *p_hwfn,
+ u16 rel_vf_id)
+{
+ struct qed_vf_info *p_vf;
+
+ p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+ if (!p_vf || !p_vf->bulletin.p_virt)
+ return NULL;
+
+ if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED)))
+ return NULL;
+
+ return p_vf->bulletin.p_virt->mac;
+}
+
+static u16
+qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
+{
+ struct qed_vf_info *p_vf;
+
+ p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+ if (!p_vf || !p_vf->bulletin.p_virt)
+ return 0;
+
+ if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED)))
+ return 0;
+
+ return p_vf->bulletin.p_virt->pvid;
+}
+
+static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, int vfid, int val)
+{
+ struct qed_vf_info *vf;
+ u8 abs_vp_id = 0;
+ u16 rl_id;
+ int rc;
+
+ vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf)
+ return -EINVAL;
+
+ rc = qed_fw_vport(p_hwfn, vf->vport_id, &abs_vp_id);
+ if (rc)
+ return rc;
+
+ rl_id = abs_vp_id; /* The "rl_id" is set as the "vport_id" */
+ return qed_init_global_rl(p_hwfn, p_ptt, rl_id, (u32)val,
+ QM_RL_TYPE_NORMAL);
+}
+
+static int
+qed_iov_configure_min_tx_rate(struct qed_dev *cdev, int vfid, u32 rate)
+{
+ struct qed_vf_info *vf;
+ u8 vport_id;
+ int i;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
+ DP_NOTICE(p_hwfn,
+ "SR-IOV sanity check failed, can't set min rate\n");
+ return -EINVAL;
+ }
+ }
+
+ vf = qed_iov_get_vf_info(QED_LEADING_HWFN(cdev), (u16)vfid, true);
+ if (!vf)
+ return -EINVAL;
+
+ vport_id = vf->vport_id;
+
+ return qed_configure_vport_wfq(cdev, vport_id, rate);
+}
+
+static int qed_iov_get_vf_min_rate(struct qed_hwfn *p_hwfn, int vfid)
+{
+ struct qed_wfq_data *vf_vp_wfq;
+ struct qed_vf_info *vf_info;
+
+ vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf_info)
+ return 0;
+
+ vf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id];
+
+ if (vf_vp_wfq->configured)
+ return vf_vp_wfq->min_speed;
+ else
+ return 0;
+}
+
+/**
+ * qed_schedule_iov - schedules IOV task for VF and PF
+ * @hwfn: hardware function pointer
+ * @flag: IOV flag for VF/PF
+ */
+void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag)
+{
+ /* Memory barrier for setting atomic bit */
+ smp_mb__before_atomic();
+ set_bit(flag, &hwfn->iov_task_flags);
+ /* Memory barrier after setting atomic bit */
+ smp_mb__after_atomic();
+ DP_VERBOSE(hwfn, QED_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
+ queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, 0);
+}
+
+void qed_vf_start_iov_wq(struct qed_dev *cdev)
+{
+ int i;
+
+ for_each_hwfn(cdev, i)
+ queue_delayed_work(cdev->hwfns[i].iov_wq,
+ &cdev->hwfns[i].iov_task, 0);
+}
+
+int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
+{
+ int i, j;
+
+ for_each_hwfn(cdev, i)
+ if (cdev->hwfns[i].iov_wq)
+ flush_workqueue(cdev->hwfns[i].iov_wq);
+
+ /* Mark VFs for disablement */
+ qed_iov_set_vfs_to_disable(cdev, true);
+
+ if (cdev->p_iov_info && cdev->p_iov_info->num_vfs && pci_enabled)
+ pci_disable_sriov(cdev->pdev);
+
+ if (cdev->recov_in_prog) {
+ DP_VERBOSE(cdev,
+ QED_MSG_IOV,
+ "Skip SRIOV disable operations in the device since a recovery is in progress\n");
+ goto out;
+ }
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *hwfn = &cdev->hwfns[i];
+ struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
+
+ /* Failure to acquire the ptt in 100g creates an odd error
+ * where the first engine has already relased IOV.
+ */
+ if (!ptt) {
+ DP_ERR(hwfn, "Failed to acquire ptt\n");
+ return -EBUSY;
+ }
+
+ /* Clean WFQ db and configure equal weight for all vports */
+ qed_clean_wfq_db(hwfn, ptt);
+
+ qed_for_each_vf(hwfn, j) {
+ int k;
+
+ if (!qed_iov_is_valid_vfid(hwfn, j, true, false))
+ continue;
+
+ /* Wait until VF is disabled before releasing */
+ for (k = 0; k < 100; k++) {
+ if (!qed_iov_is_vf_stopped(hwfn, j))
+ msleep(20);
+ else
+ break;
+ }
+
+ if (k < 100)
+ qed_iov_release_hw_for_vf(&cdev->hwfns[i],
+ ptt, j);
+ else
+ DP_ERR(hwfn,
+ "Timeout waiting for VF's FLR to end\n");
+ }
+
+ qed_ptt_release(hwfn, ptt);
+ }
+out:
+ qed_iov_set_vfs_to_disable(cdev, false);
+
+ return 0;
+}
+
+static void qed_sriov_enable_qid_config(struct qed_hwfn *hwfn,
+ u16 vfid,
+ struct qed_iov_vf_init_params *params)
+{
+ u16 base, i;
+
+ /* Since we have an equal resource distribution per-VF, and we assume
+ * PF has acquired the QED_PF_L2_QUE first queues, we start setting
+ * sequentially from there.
+ */
+ base = FEAT_NUM(hwfn, QED_PF_L2_QUE) + vfid * params->num_queues;
+
+ params->rel_vf_id = vfid;
+ for (i = 0; i < params->num_queues; i++) {
+ params->req_rx_queue[i] = base + i;
+ params->req_tx_queue[i] = base + i;
+ }
+}
+
+static int qed_sriov_enable(struct qed_dev *cdev, int num)
+{
+ struct qed_iov_vf_init_params params;
+ struct qed_hwfn *hwfn;
+ struct qed_ptt *ptt;
+ int i, j, rc;
+
+ if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) {
+ DP_NOTICE(cdev, "Can start at most %d VFs\n",
+ RESC_NUM(&cdev->hwfns[0], QED_VPORT) - 1);
+ return -EINVAL;
+ }
+
+ memset(&params, 0, sizeof(params));
+
+ /* Initialize HW for VF access */
+ for_each_hwfn(cdev, j) {
+ hwfn = &cdev->hwfns[j];
+ ptt = qed_ptt_acquire(hwfn);
+
+ /* Make sure not to use more than 16 queues per VF */
+ params.num_queues = min_t(int,
+ FEAT_NUM(hwfn, QED_VF_L2_QUE) / num,
+ 16);
+
+ if (!ptt) {
+ DP_ERR(hwfn, "Failed to acquire ptt\n");
+ rc = -EBUSY;
+ goto err;
+ }
+
+ for (i = 0; i < num; i++) {
+ if (!qed_iov_is_valid_vfid(hwfn, i, false, true))
+ continue;
+
+ qed_sriov_enable_qid_config(hwfn, i, &params);
+ rc = qed_iov_init_hw_for_vf(hwfn, ptt, &params);
+ if (rc) {
+ DP_ERR(cdev, "Failed to enable VF[%d]\n", i);
+ qed_ptt_release(hwfn, ptt);
+ goto err;
+ }
+ }
+
+ qed_ptt_release(hwfn, ptt);
+ }
+
+ /* Enable SRIOV PCIe functions */
+ rc = pci_enable_sriov(cdev->pdev, num);
+ if (rc) {
+ DP_ERR(cdev, "Failed to enable sriov [%d]\n", rc);
+ goto err;
+ }
+
+ hwfn = QED_LEADING_HWFN(cdev);
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt) {
+ DP_ERR(hwfn, "Failed to acquire ptt\n");
+ rc = -EBUSY;
+ goto err;
+ }
+
+ rc = qed_mcp_ov_update_eswitch(hwfn, ptt, QED_OV_ESWITCH_VEB);
+ if (rc)
+ DP_INFO(cdev, "Failed to update eswitch mode\n");
+ qed_ptt_release(hwfn, ptt);
+
+ return num;
+
+err:
+ qed_sriov_disable(cdev, false);
+ return rc;
+}
+
+static int qed_sriov_configure(struct qed_dev *cdev, int num_vfs_param)
+{
+ if (!IS_QED_SRIOV(cdev)) {
+ DP_VERBOSE(cdev, QED_MSG_IOV, "SR-IOV is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (num_vfs_param)
+ return qed_sriov_enable(cdev, num_vfs_param);
+ else
+ return qed_sriov_disable(cdev, true);
+}
+
+static int qed_sriov_pf_set_mac(struct qed_dev *cdev, u8 *mac, int vfid)
+{
+ int i;
+
+ if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) {
+ DP_VERBOSE(cdev, QED_MSG_IOV,
+ "Cannot set a VF MAC; Sriov is not enabled\n");
+ return -EINVAL;
+ }
+
+ if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true, true)) {
+ DP_VERBOSE(cdev, QED_MSG_IOV,
+ "Cannot set VF[%d] MAC (VF is not active)\n", vfid);
+ return -EINVAL;
+ }
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *hwfn = &cdev->hwfns[i];
+ struct qed_public_vf_info *vf_info;
+
+ vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
+ if (!vf_info)
+ continue;
+
+ /* Set the MAC, and schedule the IOV task */
+ if (vf_info->is_trusted_configured)
+ ether_addr_copy(vf_info->mac, mac);
+ else
+ ether_addr_copy(vf_info->forced_mac, mac);
+
+ qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG);
+ }
+
+ return 0;
+}
+
+static int qed_sriov_pf_set_vlan(struct qed_dev *cdev, u16 vid, int vfid)
+{
+ int i;
+
+ if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) {
+ DP_VERBOSE(cdev, QED_MSG_IOV,
+ "Cannot set a VF MAC; Sriov is not enabled\n");
+ return -EINVAL;
+ }
+
+ if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true, true)) {
+ DP_VERBOSE(cdev, QED_MSG_IOV,
+ "Cannot set VF[%d] MAC (VF is not active)\n", vfid);
+ return -EINVAL;
+ }
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *hwfn = &cdev->hwfns[i];
+ struct qed_public_vf_info *vf_info;
+
+ vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
+ if (!vf_info)
+ continue;
+
+ /* Set the forced vlan, and schedule the IOV task */
+ vf_info->forced_vlan = vid;
+ qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG);
+ }
+
+ return 0;
+}
+
+static int qed_get_vf_config(struct qed_dev *cdev,
+ int vf_id, struct ifla_vf_info *ivi)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_public_vf_info *vf_info;
+ struct qed_mcp_link_state link;
+ u32 tx_rate;
+ int ret;
+
+ /* Sanitize request */
+ if (IS_VF(cdev))
+ return -EINVAL;
+
+ if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true, false)) {
+ DP_VERBOSE(cdev, QED_MSG_IOV,
+ "VF index [%d] isn't active\n", vf_id);
+ return -EINVAL;
+ }
+
+ vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true);
+
+ ret = qed_iov_get_link(hwfn, vf_id, NULL, &link, NULL);
+ if (ret)
+ return ret;
+
+ /* Fill information about VF */
+ ivi->vf = vf_id;
+
+ if (is_valid_ether_addr(vf_info->forced_mac))
+ ether_addr_copy(ivi->mac, vf_info->forced_mac);
+ else
+ ether_addr_copy(ivi->mac, vf_info->mac);
+
+ ivi->vlan = vf_info->forced_vlan;
+ ivi->spoofchk = qed_iov_spoofchk_get(hwfn, vf_id);
+ ivi->linkstate = vf_info->link_state;
+ tx_rate = vf_info->tx_rate;
+ ivi->max_tx_rate = tx_rate ? tx_rate : link.speed;
+ ivi->min_tx_rate = qed_iov_get_vf_min_rate(hwfn, vf_id);
+ ivi->trusted = vf_info->is_trusted_request;
+
+ return 0;
+}
+
+void qed_inform_vf_link_state(struct qed_hwfn *hwfn)
+{
+ struct qed_hwfn *lead_hwfn = QED_LEADING_HWFN(hwfn->cdev);
+ struct qed_mcp_link_capabilities caps;
+ struct qed_mcp_link_params params;
+ struct qed_mcp_link_state link;
+ int i;
+
+ if (!hwfn->pf_iov_info)
+ return;
+
+ /* Update bulletin of all future possible VFs with link configuration */
+ for (i = 0; i < hwfn->cdev->p_iov_info->total_vfs; i++) {
+ struct qed_public_vf_info *vf_info;
+
+ vf_info = qed_iov_get_public_vf_info(hwfn, i, false);
+ if (!vf_info)
+ continue;
+
+ /* Only hwfn0 is actually interested in the link speed.
+ * But since only it would receive an MFW indication of link,
+ * need to take configuration from it - otherwise things like
+ * rate limiting for hwfn1 VF would not work.
+ */
+ memcpy(&params, qed_mcp_get_link_params(lead_hwfn),
+ sizeof(params));
+ memcpy(&link, qed_mcp_get_link_state(lead_hwfn), sizeof(link));
+ memcpy(&caps, qed_mcp_get_link_capabilities(lead_hwfn),
+ sizeof(caps));
+
+ /* Modify link according to the VF's configured link state */
+ switch (vf_info->link_state) {
+ case IFLA_VF_LINK_STATE_DISABLE:
+ link.link_up = false;
+ break;
+ case IFLA_VF_LINK_STATE_ENABLE:
+ link.link_up = true;
+ /* Set speed according to maximum supported by HW.
+ * that is 40G for regular devices and 100G for CMT
+ * mode devices.
+ */
+ link.speed = (hwfn->cdev->num_hwfns > 1) ?
+ 100000 : 40000;
+ break;
+ default:
+ /* In auto mode pass PF link image to VF */
+ break;
+ }
+
+ if (link.link_up && vf_info->tx_rate) {
+ struct qed_ptt *ptt;
+ int rate;
+
+ rate = min_t(int, vf_info->tx_rate, link.speed);
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt) {
+ DP_NOTICE(hwfn, "Failed to acquire PTT\n");
+ return;
+ }
+
+ if (!qed_iov_configure_tx_rate(hwfn, ptt, i, rate)) {
+ vf_info->tx_rate = rate;
+ link.speed = rate;
+ }
+
+ qed_ptt_release(hwfn, ptt);
+ }
+
+ qed_iov_set_link(hwfn, i, &params, &link, &caps);
+ }
+
+ qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
+}
+
+static int qed_set_vf_link_state(struct qed_dev *cdev,
+ int vf_id, int link_state)
+{
+ int i;
+
+ /* Sanitize request */
+ if (IS_VF(cdev))
+ return -EINVAL;
+
+ if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true, true)) {
+ DP_VERBOSE(cdev, QED_MSG_IOV,
+ "VF index [%d] isn't active\n", vf_id);
+ return -EINVAL;
+ }
+
+ /* Handle configuration of link state */
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *hwfn = &cdev->hwfns[i];
+ struct qed_public_vf_info *vf;
+
+ vf = qed_iov_get_public_vf_info(hwfn, vf_id, true);
+ if (!vf)
+ continue;
+
+ if (vf->link_state == link_state)
+ continue;
+
+ vf->link_state = link_state;
+ qed_inform_vf_link_state(&cdev->hwfns[i]);
+ }
+
+ return 0;
+}
+
+static int qed_spoof_configure(struct qed_dev *cdev, int vfid, bool val)
+{
+ int i, rc = -EINVAL;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ rc = qed_iov_spoofchk_set(p_hwfn, vfid, val);
+ if (rc)
+ break;
+ }
+
+ return rc;
+}
+
+static int qed_configure_max_vf_rate(struct qed_dev *cdev, int vfid, int rate)
+{
+ int i;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ struct qed_public_vf_info *vf;
+
+ if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
+ DP_NOTICE(p_hwfn,
+ "SR-IOV sanity check failed, can't set tx rate\n");
+ return -EINVAL;
+ }
+
+ vf = qed_iov_get_public_vf_info(p_hwfn, vfid, true);
+
+ vf->tx_rate = rate;
+
+ qed_inform_vf_link_state(p_hwfn);
+ }
+
+ return 0;
+}
+
+static int qed_set_vf_rate(struct qed_dev *cdev,
+ int vfid, u32 min_rate, u32 max_rate)
+{
+ int rc_min = 0, rc_max = 0;
+
+ if (max_rate)
+ rc_max = qed_configure_max_vf_rate(cdev, vfid, max_rate);
+
+ if (min_rate)
+ rc_min = qed_iov_configure_min_tx_rate(cdev, vfid, min_rate);
+
+ if (rc_max | rc_min)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int qed_set_vf_trust(struct qed_dev *cdev, int vfid, bool trust)
+{
+ int i;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *hwfn = &cdev->hwfns[i];
+ struct qed_public_vf_info *vf;
+
+ if (!qed_iov_pf_sanity_check(hwfn, vfid)) {
+ DP_NOTICE(hwfn,
+ "SR-IOV sanity check failed, can't set trust\n");
+ return -EINVAL;
+ }
+
+ vf = qed_iov_get_public_vf_info(hwfn, vfid, true);
+
+ if (vf->is_trusted_request == trust)
+ return 0;
+ vf->is_trusted_request = trust;
+
+ qed_schedule_iov(hwfn, QED_IOV_WQ_TRUST_FLAG);
+ }
+
+ return 0;
+}
+
+static void qed_handle_vf_msg(struct qed_hwfn *hwfn)
+{
+ u64 events[QED_VF_ARRAY_LENGTH];
+ struct qed_ptt *ptt;
+ int i;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt) {
+ DP_VERBOSE(hwfn, QED_MSG_IOV,
+ "Can't acquire PTT; re-scheduling\n");
+ qed_schedule_iov(hwfn, QED_IOV_WQ_MSG_FLAG);
+ return;
+ }
+
+ qed_iov_pf_get_pending_events(hwfn, events);
+
+ DP_VERBOSE(hwfn, QED_MSG_IOV,
+ "Event mask of VF events: 0x%llx 0x%llx 0x%llx\n",
+ events[0], events[1], events[2]);
+
+ qed_for_each_vf(hwfn, i) {
+ /* Skip VFs with no pending messages */
+ if (!(events[i / 64] & (1ULL << (i % 64))))
+ continue;
+
+ DP_VERBOSE(hwfn, QED_MSG_IOV,
+ "Handling VF message from VF 0x%02x [Abs 0x%02x]\n",
+ i, hwfn->cdev->p_iov_info->first_vf_in_pf + i);
+
+ /* Copy VF's message to PF's request buffer for that VF */
+ if (qed_iov_copy_vf_msg(hwfn, ptt, i))
+ continue;
+
+ qed_iov_process_mbx_req(hwfn, ptt, i);
+ }
+
+ qed_ptt_release(hwfn, ptt);
+}
+
+static bool qed_pf_validate_req_vf_mac(struct qed_hwfn *hwfn,
+ u8 *mac,
+ struct qed_public_vf_info *info)
+{
+ if (info->is_trusted_configured) {
+ if (is_valid_ether_addr(info->mac) &&
+ (!mac || !ether_addr_equal(mac, info->mac)))
+ return true;
+ } else {
+ if (is_valid_ether_addr(info->forced_mac) &&
+ (!mac || !ether_addr_equal(mac, info->forced_mac)))
+ return true;
+ }
+
+ return false;
+}
+
+static void qed_set_bulletin_mac(struct qed_hwfn *hwfn,
+ struct qed_public_vf_info *info,
+ int vfid)
+{
+ if (info->is_trusted_configured)
+ qed_iov_bulletin_set_mac(hwfn, info->mac, vfid);
+ else
+ qed_iov_bulletin_set_forced_mac(hwfn, info->forced_mac, vfid);
+}
+
+static void qed_handle_pf_set_vf_unicast(struct qed_hwfn *hwfn)
+{
+ int i;
+
+ qed_for_each_vf(hwfn, i) {
+ struct qed_public_vf_info *info;
+ bool update = false;
+ u8 *mac;
+
+ info = qed_iov_get_public_vf_info(hwfn, i, true);
+ if (!info)
+ continue;
+
+ /* Update data on bulletin board */
+ if (info->is_trusted_configured)
+ mac = qed_iov_bulletin_get_mac(hwfn, i);
+ else
+ mac = qed_iov_bulletin_get_forced_mac(hwfn, i);
+
+ if (qed_pf_validate_req_vf_mac(hwfn, mac, info)) {
+ DP_VERBOSE(hwfn,
+ QED_MSG_IOV,
+ "Handling PF setting of VF MAC to VF 0x%02x [Abs 0x%02x]\n",
+ i,
+ hwfn->cdev->p_iov_info->first_vf_in_pf + i);
+
+ /* Update bulletin board with MAC */
+ qed_set_bulletin_mac(hwfn, info, i);
+ update = true;
+ }
+
+ if (qed_iov_bulletin_get_forced_vlan(hwfn, i) ^
+ info->forced_vlan) {
+ DP_VERBOSE(hwfn,
+ QED_MSG_IOV,
+ "Handling PF setting of pvid [0x%04x] to VF 0x%02x [Abs 0x%02x]\n",
+ info->forced_vlan,
+ i,
+ hwfn->cdev->p_iov_info->first_vf_in_pf + i);
+ qed_iov_bulletin_set_forced_vlan(hwfn,
+ info->forced_vlan, i);
+ update = true;
+ }
+
+ if (update)
+ qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
+ }
+}
+
+static void qed_handle_bulletin_post(struct qed_hwfn *hwfn)
+{
+ struct qed_ptt *ptt;
+ int i;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt) {
+ DP_NOTICE(hwfn, "Failed allocating a ptt entry\n");
+ qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
+ return;
+ }
+
+ qed_for_each_vf(hwfn, i)
+ qed_iov_post_vf_bulletin(hwfn, i, ptt);
+
+ qed_ptt_release(hwfn, ptt);
+}
+
+static void qed_update_mac_for_vf_trust_change(struct qed_hwfn *hwfn, int vf_id)
+{
+ struct qed_public_vf_info *vf_info;
+ struct qed_vf_info *vf;
+ u8 *force_mac;
+ int i;
+
+ vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true);
+ vf = qed_iov_get_vf_info(hwfn, vf_id, true);
+
+ if (!vf_info || !vf)
+ return;
+
+ /* Force MAC converted to generic MAC in case of VF trust on */
+ if (vf_info->is_trusted_configured &&
+ (vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED))) {
+ force_mac = qed_iov_bulletin_get_forced_mac(hwfn, vf_id);
+
+ if (force_mac) {
+ /* Clear existing shadow copy of MAC to have a clean
+ * slate.
+ */
+ for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
+ if (ether_addr_equal(vf->shadow_config.macs[i],
+ vf_info->mac)) {
+ eth_zero_addr(vf->shadow_config.macs[i]);
+ DP_VERBOSE(hwfn, QED_MSG_IOV,
+ "Shadow MAC %pM removed for VF 0x%02x, VF trust mode is ON\n",
+ vf_info->mac, vf_id);
+ break;
+ }
+ }
+
+ ether_addr_copy(vf_info->mac, force_mac);
+ eth_zero_addr(vf_info->forced_mac);
+ vf->bulletin.p_virt->valid_bitmap &=
+ ~BIT(MAC_ADDR_FORCED);
+ qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
+ }
+ }
+
+ /* Update shadow copy with VF MAC when trust mode is turned off */
+ if (!vf_info->is_trusted_configured) {
+ u8 empty_mac[ETH_ALEN];
+
+ eth_zero_addr(empty_mac);
+ for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
+ if (ether_addr_equal(vf->shadow_config.macs[i],
+ empty_mac)) {
+ ether_addr_copy(vf->shadow_config.macs[i],
+ vf_info->mac);
+ DP_VERBOSE(hwfn, QED_MSG_IOV,
+ "Shadow is updated with %pM for VF 0x%02x, VF trust mode is OFF\n",
+ vf_info->mac, vf_id);
+ break;
+ }
+ }
+ /* Clear bulletin when trust mode is turned off,
+ * to have a clean slate for next (normal) operations.
+ */
+ qed_iov_bulletin_set_mac(hwfn, empty_mac, vf_id);
+ qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
+ }
+}
+
+static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn)
+{
+ struct qed_sp_vport_update_params params;
+ struct qed_filter_accept_flags *flags;
+ struct qed_public_vf_info *vf_info;
+ struct qed_vf_info *vf;
+ u8 mask;
+ int i;
+
+ mask = QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED;
+ flags = &params.accept_flags;
+
+ qed_for_each_vf(hwfn, i) {
+ /* Need to make sure current requested configuration didn't
+ * flip so that we'll end up configuring something that's not
+ * needed.
+ */
+ vf_info = qed_iov_get_public_vf_info(hwfn, i, true);
+ if (vf_info->is_trusted_configured ==
+ vf_info->is_trusted_request)
+ continue;
+ vf_info->is_trusted_configured = vf_info->is_trusted_request;
+
+ /* Handle forced MAC mode */
+ qed_update_mac_for_vf_trust_change(hwfn, i);
+
+ /* Validate that the VF has a configured vport */
+ vf = qed_iov_get_vf_info(hwfn, i, true);
+ if (!vf || !vf->vport_instance)
+ continue;
+
+ memset(&params, 0, sizeof(params));
+ params.opaque_fid = vf->opaque_fid;
+ params.vport_id = vf->vport_id;
+
+ params.update_ctl_frame_check = 1;
+ params.mac_chk_en = !vf_info->is_trusted_configured;
+ params.update_accept_any_vlan_flg = 0;
+
+ if (vf_info->accept_any_vlan && vf_info->forced_vlan) {
+ params.update_accept_any_vlan_flg = 1;
+ params.accept_any_vlan = vf_info->accept_any_vlan;
+ }
+
+ if (vf_info->rx_accept_mode & mask) {
+ flags->update_rx_mode_config = 1;
+ flags->rx_accept_filter = vf_info->rx_accept_mode;
+ }
+
+ if (vf_info->tx_accept_mode & mask) {
+ flags->update_tx_mode_config = 1;
+ flags->tx_accept_filter = vf_info->tx_accept_mode;
+ }
+
+ /* Remove if needed; Otherwise this would set the mask */
+ if (!vf_info->is_trusted_configured) {
+ flags->rx_accept_filter &= ~mask;
+ flags->tx_accept_filter &= ~mask;
+ params.accept_any_vlan = false;
+ }
+
+ if (flags->update_rx_mode_config ||
+ flags->update_tx_mode_config ||
+ params.update_ctl_frame_check ||
+ params.update_accept_any_vlan_flg) {
+ DP_VERBOSE(hwfn, QED_MSG_IOV,
+ "vport update config for %s VF[abs 0x%x rel 0x%x]\n",
+ vf_info->is_trusted_configured ? "trusted" : "untrusted",
+ vf->abs_vf_id, vf->relative_vf_id);
+ qed_sp_vport_update(hwfn, &params,
+ QED_SPQ_MODE_EBLOCK, NULL);
+ }
+ }
+}
+
+static void qed_iov_pf_task(struct work_struct *work)
+
+{
+ struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
+ iov_task.work);
+ int rc;
+
+ if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags))
+ return;
+
+ if (test_and_clear_bit(QED_IOV_WQ_FLR_FLAG, &hwfn->iov_task_flags)) {
+ struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
+
+ if (!ptt) {
+ qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG);
+ return;
+ }
+
+ rc = qed_iov_vf_flr_cleanup(hwfn, ptt);
+ if (rc)
+ qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG);
+
+ qed_ptt_release(hwfn, ptt);
+ }
+
+ if (test_and_clear_bit(QED_IOV_WQ_MSG_FLAG, &hwfn->iov_task_flags))
+ qed_handle_vf_msg(hwfn);
+
+ if (test_and_clear_bit(QED_IOV_WQ_SET_UNICAST_FILTER_FLAG,
+ &hwfn->iov_task_flags))
+ qed_handle_pf_set_vf_unicast(hwfn);
+
+ if (test_and_clear_bit(QED_IOV_WQ_BULLETIN_UPDATE_FLAG,
+ &hwfn->iov_task_flags))
+ qed_handle_bulletin_post(hwfn);
+
+ if (test_and_clear_bit(QED_IOV_WQ_TRUST_FLAG, &hwfn->iov_task_flags))
+ qed_iov_handle_trust_change(hwfn);
+}
+
+void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
+{
+ int i;
+
+ for_each_hwfn(cdev, i) {
+ if (!cdev->hwfns[i].iov_wq)
+ continue;
+
+ if (schedule_first) {
+ qed_schedule_iov(&cdev->hwfns[i],
+ QED_IOV_WQ_STOP_WQ_FLAG);
+ cancel_delayed_work_sync(&cdev->hwfns[i].iov_task);
+ }
+
+ destroy_workqueue(cdev->hwfns[i].iov_wq);
+ }
+}
+
+int qed_iov_wq_start(struct qed_dev *cdev)
+{
+ char name[NAME_SIZE];
+ int i;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ /* PFs needs a dedicated workqueue only if they support IOV.
+ * VFs always require one.
+ */
+ if (IS_PF(p_hwfn->cdev) && !IS_PF_SRIOV(p_hwfn))
+ continue;
+
+ snprintf(name, NAME_SIZE, "iov-%02x:%02x.%02x",
+ cdev->pdev->bus->number,
+ PCI_SLOT(cdev->pdev->devfn), p_hwfn->abs_pf_id);
+
+ p_hwfn->iov_wq = create_singlethread_workqueue(name);
+ if (!p_hwfn->iov_wq) {
+ DP_NOTICE(p_hwfn, "Cannot create iov workqueue\n");
+ return -ENOMEM;
+ }
+
+ if (IS_PF(cdev))
+ INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_pf_task);
+ else
+ INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_vf_task);
+ }
+
+ return 0;
+}
+
+const struct qed_iov_hv_ops qed_iov_ops_pass = {
+ .configure = &qed_sriov_configure,
+ .set_mac = &qed_sriov_pf_set_mac,
+ .set_vlan = &qed_sriov_pf_set_vlan,
+ .get_config = &qed_get_vf_config,
+ .set_link_state = &qed_set_vf_link_state,
+ .set_spoof = &qed_spoof_configure,
+ .set_rate = &qed_set_vf_rate,
+ .set_trust = &qed_set_vf_trust,
+};
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
new file mode 100644
index 000000000..6ee2493de
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
@@ -0,0 +1,501 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ * Copyright (c) 2019-2020 Marvell International Ltd.
+ */
+
+#ifndef _QED_SRIOV_H
+#define _QED_SRIOV_H
+#include <linux/types.h>
+#include "qed_vf.h"
+
+#define QED_ETH_VF_NUM_MAC_FILTERS 1
+#define QED_ETH_VF_NUM_VLAN_FILTERS 2
+#define QED_VF_ARRAY_LENGTH (3)
+
+#ifdef CONFIG_QED_SRIOV
+#define IS_VF(cdev) ((cdev)->b_is_vf)
+#define IS_PF(cdev) (!((cdev)->b_is_vf))
+#define IS_PF_SRIOV(p_hwfn) (!!((p_hwfn)->cdev->p_iov_info))
+#else
+#define IS_VF(cdev) (0)
+#define IS_PF(cdev) (1)
+#define IS_PF_SRIOV(p_hwfn) (0)
+#endif
+#define IS_PF_SRIOV_ALLOC(p_hwfn) (!!((p_hwfn)->pf_iov_info))
+
+#define QED_MAX_VF_CHAINS_PER_PF 16
+
+#define QED_ETH_MAX_VF_NUM_VLAN_FILTERS \
+ (MAX_NUM_VFS * QED_ETH_VF_NUM_VLAN_FILTERS)
+
+enum qed_iov_vport_update_flag {
+ QED_IOV_VP_UPDATE_ACTIVATE,
+ QED_IOV_VP_UPDATE_VLAN_STRIP,
+ QED_IOV_VP_UPDATE_TX_SWITCH,
+ QED_IOV_VP_UPDATE_MCAST,
+ QED_IOV_VP_UPDATE_ACCEPT_PARAM,
+ QED_IOV_VP_UPDATE_RSS,
+ QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN,
+ QED_IOV_VP_UPDATE_SGE_TPA,
+ QED_IOV_VP_UPDATE_MAX,
+};
+
+struct qed_public_vf_info {
+ /* These copies will later be reflected in the bulletin board,
+ * but this copy should be newer.
+ */
+ u8 forced_mac[ETH_ALEN];
+ u16 forced_vlan;
+ u8 mac[ETH_ALEN];
+
+ /* IFLA_VF_LINK_STATE_<X> */
+ int link_state;
+
+ /* Currently configured Tx rate in MB/sec. 0 if unconfigured */
+ int tx_rate;
+
+ /* Trusted VFs can configure promiscuous mode.
+ * Also store shadow promisc configuration if needed.
+ */
+ bool is_trusted_configured;
+ bool is_trusted_request;
+ u8 rx_accept_mode;
+ u8 tx_accept_mode;
+ bool accept_any_vlan;
+};
+
+struct qed_iov_vf_init_params {
+ u16 rel_vf_id;
+
+ /* Number of requested Queues; Currently, don't support different
+ * number of Rx/Tx queues.
+ */
+
+ u16 num_queues;
+
+ /* Allow the client to choose which qzones to use for Rx/Tx,
+ * and which queue_base to use for Tx queues on a per-queue basis.
+ * Notice values should be relative to the PF resources.
+ */
+ u16 req_rx_queue[QED_MAX_VF_CHAINS_PER_PF];
+ u16 req_tx_queue[QED_MAX_VF_CHAINS_PER_PF];
+};
+
+/* This struct is part of qed_dev and contains data relevant to all hwfns;
+ * Initialized only if SR-IOV cpabability is exposed in PCIe config space.
+ */
+struct qed_hw_sriov_info {
+ int pos; /* capability position */
+ int nres; /* number of resources */
+ u32 cap; /* SR-IOV Capabilities */
+ u16 ctrl; /* SR-IOV Control */
+ u16 total_vfs; /* total VFs associated with the PF */
+ u16 num_vfs; /* number of vfs that have been started */
+ u16 initial_vfs; /* initial VFs associated with the PF */
+ u16 nr_virtfn; /* number of VFs available */
+ u16 offset; /* first VF Routing ID offset */
+ u16 stride; /* following VF stride */
+ u16 vf_device_id; /* VF device id */
+ u32 pgsz; /* page size for BAR alignment */
+ u8 link; /* Function Dependency Link */
+
+ u32 first_vf_in_pf;
+};
+
+/* This mailbox is maintained per VF in its PF contains all information
+ * required for sending / receiving a message.
+ */
+struct qed_iov_vf_mbx {
+ union vfpf_tlvs *req_virt;
+ dma_addr_t req_phys;
+ union pfvf_tlvs *reply_virt;
+ dma_addr_t reply_phys;
+
+ /* Address in VF where a pending message is located */
+ dma_addr_t pending_req;
+
+ /* Message from VF awaits handling */
+ bool b_pending_msg;
+
+ u8 *offset;
+
+ /* saved VF request header */
+ struct vfpf_first_tlv first_tlv;
+};
+
+#define QED_IOV_LEGACY_QID_RX (0)
+#define QED_IOV_LEGACY_QID_TX (1)
+#define QED_IOV_QID_INVALID (0xFE)
+
+struct qed_vf_queue_cid {
+ bool b_is_tx;
+ struct qed_queue_cid *p_cid;
+};
+
+/* Describes a qzone associated with the VF */
+struct qed_vf_queue {
+ u16 fw_rx_qid;
+ u16 fw_tx_qid;
+
+ struct qed_vf_queue_cid cids[MAX_QUEUES_PER_QZONE];
+};
+
+enum vf_state {
+ VF_FREE = 0, /* VF ready to be acquired holds no resc */
+ VF_ACQUIRED, /* VF, acquired, but not initialized */
+ VF_ENABLED, /* VF, Enabled */
+ VF_RESET, /* VF, FLR'd, pending cleanup */
+ VF_STOPPED /* VF, Stopped */
+};
+
+struct qed_vf_vlan_shadow {
+ bool used;
+ u16 vid;
+};
+
+struct qed_vf_shadow_config {
+ /* Shadow copy of all guest vlans */
+ struct qed_vf_vlan_shadow vlans[QED_ETH_VF_NUM_VLAN_FILTERS + 1];
+
+ /* Shadow copy of all configured MACs; Empty if forcing MACs */
+ u8 macs[QED_ETH_VF_NUM_MAC_FILTERS][ETH_ALEN];
+ u8 inner_vlan_removal;
+};
+
+/* PFs maintain an array of this structure, per VF */
+struct qed_vf_info {
+ struct qed_iov_vf_mbx vf_mbx;
+ enum vf_state state;
+ bool b_init;
+ bool b_malicious;
+ u8 to_disable;
+
+ struct qed_bulletin bulletin;
+ dma_addr_t vf_bulletin;
+
+ /* PF saves a copy of the last VF acquire message */
+ struct vfpf_acquire_tlv acquire;
+
+ u32 concrete_fid;
+ u16 opaque_fid;
+ u16 mtu;
+
+ u8 vport_id;
+ u8 relative_vf_id;
+ u8 abs_vf_id;
+#define QED_VF_ABS_ID(p_hwfn, p_vf) (QED_PATH_ID(p_hwfn) ? \
+ (p_vf)->abs_vf_id + MAX_NUM_VFS_BB : \
+ (p_vf)->abs_vf_id)
+
+ u8 vport_instance;
+ u8 num_rxqs;
+ u8 num_txqs;
+
+ u16 rx_coal;
+ u16 tx_coal;
+
+ u8 num_sbs;
+
+ u8 num_mac_filters;
+ u8 num_vlan_filters;
+
+ struct qed_vf_queue vf_queues[QED_MAX_VF_CHAINS_PER_PF];
+ u16 igu_sbs[QED_MAX_VF_CHAINS_PER_PF];
+ u8 num_active_rxqs;
+ struct qed_public_vf_info p_vf_info;
+ bool spoof_chk;
+ bool req_spoofchk_val;
+
+ /* Stores the configuration requested by VF */
+ struct qed_vf_shadow_config shadow_config;
+
+ /* A bitfield using bulletin's valid-map bits, used to indicate
+ * which of the bulletin board features have been configured.
+ */
+ u64 configured_features;
+#define QED_IOV_CONFIGURED_FEATURES_MASK ((1 << MAC_ADDR_FORCED) | \
+ (1 << VLAN_ADDR_FORCED))
+};
+
+/* This structure is part of qed_hwfn and used only for PFs that have sriov
+ * capability enabled.
+ */
+struct qed_pf_iov {
+ struct qed_vf_info vfs_array[MAX_NUM_VFS];
+ u64 pending_flr[QED_VF_ARRAY_LENGTH];
+
+ /* Allocate message address continuosuly and split to each VF */
+ void *mbx_msg_virt_addr;
+ dma_addr_t mbx_msg_phys_addr;
+ u32 mbx_msg_size;
+ void *mbx_reply_virt_addr;
+ dma_addr_t mbx_reply_phys_addr;
+ u32 mbx_reply_size;
+ void *p_bulletins;
+ dma_addr_t bulletins_phys;
+ u32 bulletins_size;
+};
+
+enum qed_iov_wq_flag {
+ QED_IOV_WQ_MSG_FLAG,
+ QED_IOV_WQ_SET_UNICAST_FILTER_FLAG,
+ QED_IOV_WQ_BULLETIN_UPDATE_FLAG,
+ QED_IOV_WQ_STOP_WQ_FLAG,
+ QED_IOV_WQ_FLR_FLAG,
+ QED_IOV_WQ_TRUST_FLAG,
+ QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG,
+};
+
+extern const struct qed_iov_hv_ops qed_iov_ops_pass;
+
+#ifdef CONFIG_QED_SRIOV
+/**
+ * qed_iov_is_valid_vfid(): Check if given VF ID @vfid is valid
+ * w.r.t. @b_enabled_only value
+ * if b_enabled_only = true - only enabled
+ * VF id is valid.
+ * else any VF id less than max_vfs is valid.
+ *
+ * @p_hwfn: HW device data.
+ * @rel_vf_id: Relative VF ID.
+ * @b_enabled_only: consider only enabled VF.
+ * @b_non_malicious: true iff we want to validate vf isn't malicious.
+ *
+ * Return: bool - true for valid VF ID
+ */
+bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
+ int rel_vf_id,
+ bool b_enabled_only, bool b_non_malicious);
+
+/**
+ * qed_iov_get_next_active_vf(): Given a VF index, return index of
+ * next [including that] active VF.
+ *
+ * @p_hwfn: HW device data.
+ * @rel_vf_id: VF ID.
+ *
+ * Return: MAX_NUM_VFS in case no further active VFs, otherwise index.
+ */
+u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id);
+
+void qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn,
+ int vfid, u16 vxlan_port, u16 geneve_port);
+
+/**
+ * qed_iov_hw_info(): Read sriov related information and allocated resources
+ * reads from configuration space, shmem, etc.
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Return: Int.
+ */
+int qed_iov_hw_info(struct qed_hwfn *p_hwfn);
+
+/**
+ * qed_add_tlv(): place a given tlv on the tlv buffer at next offset
+ *
+ * @p_hwfn: HW device data.
+ * @offset: offset.
+ * @type: Type
+ * @length: Length.
+ *
+ * Return: pointer to the newly placed tlv
+ */
+void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length);
+
+/**
+ * qed_dp_tlv_list(): list the types and lengths of the tlvs on the buffer
+ *
+ * @p_hwfn: HW device data.
+ * @tlvs_list: Tlvs_list.
+ *
+ * Return: Void.
+ */
+void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list);
+
+/**
+ * qed_sriov_vfpf_malicious(): Handle malicious VF/PF.
+ *
+ * @p_hwfn: HW device data.
+ * @p_data: Pointer to data.
+ *
+ * Return: Void.
+ */
+void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
+ struct fw_err_data *p_data);
+
+/**
+ * qed_sriov_eqe_event(): Callback for SRIOV events.
+ *
+ * @p_hwfn: HW device data.
+ * @opcode: Opcode.
+ * @echo: Echo.
+ * @data: data
+ * @fw_return_code: FW return code.
+ *
+ * Return: Int.
+ */
+int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, __le16 echo,
+ union event_ring_data *data, u8 fw_return_code);
+
+/**
+ * qed_iov_alloc(): allocate sriov related resources
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Return: Int.
+ */
+int qed_iov_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * qed_iov_setup(): setup sriov related resources
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Return: Void.
+ */
+void qed_iov_setup(struct qed_hwfn *p_hwfn);
+
+/**
+ * qed_iov_free(): free sriov related resources
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Return: Void.
+ */
+void qed_iov_free(struct qed_hwfn *p_hwfn);
+
+/**
+ * qed_iov_free_hw_info(): free sriov related memory that was
+ * allocated during hw_prepare
+ *
+ * @cdev: Qed dev pointer.
+ *
+ * Return: Void.
+ */
+void qed_iov_free_hw_info(struct qed_dev *cdev);
+
+/**
+ * qed_iov_mark_vf_flr(): Mark structs of vfs that have been FLR-ed.
+ *
+ * @p_hwfn: HW device data.
+ * @disabled_vfs: bitmask of all VFs on path that were FLRed
+ *
+ * Return: true iff one of the PF's vfs got FLRed. false otherwise.
+ */
+bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *disabled_vfs);
+
+/**
+ * qed_iov_search_list_tlvs(): Search extended TLVs in request/reply buffer.
+ *
+ * @p_hwfn: HW device data.
+ * @p_tlvs_list: Pointer to tlvs list
+ * @req_type: Type of TLV
+ *
+ * Return: pointer to tlv type if found, otherwise returns NULL.
+ */
+void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn,
+ void *p_tlvs_list, u16 req_type);
+
+void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first);
+int qed_iov_wq_start(struct qed_dev *cdev);
+
+void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag);
+void qed_vf_start_iov_wq(struct qed_dev *cdev);
+int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled);
+void qed_inform_vf_link_state(struct qed_hwfn *hwfn);
+#else
+static inline bool
+qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
+ int rel_vf_id, bool b_enabled_only, bool b_non_malicious)
+{
+ return false;
+}
+
+static inline u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn,
+ u16 rel_vf_id)
+{
+ return MAX_NUM_VFS;
+}
+
+static inline void
+qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn, int vfid,
+ u16 vxlan_port, u16 geneve_port)
+{
+}
+
+static inline int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
+{
+ return 0;
+}
+
+static inline int qed_iov_alloc(struct qed_hwfn *p_hwfn)
+{
+ return 0;
+}
+
+static inline void qed_iov_setup(struct qed_hwfn *p_hwfn)
+{
+}
+
+static inline void qed_iov_free(struct qed_hwfn *p_hwfn)
+{
+}
+
+static inline void qed_iov_free_hw_info(struct qed_dev *cdev)
+{
+}
+
+static inline bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn,
+ u32 *disabled_vfs)
+{
+ return false;
+}
+
+static inline void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
+{
+}
+
+static inline int qed_iov_wq_start(struct qed_dev *cdev)
+{
+ return 0;
+}
+
+static inline void qed_schedule_iov(struct qed_hwfn *hwfn,
+ enum qed_iov_wq_flag flag)
+{
+}
+
+static inline void qed_vf_start_iov_wq(struct qed_dev *cdev)
+{
+}
+
+static inline int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
+{
+ return 0;
+}
+
+static inline void qed_inform_vf_link_state(struct qed_hwfn *hwfn)
+{
+}
+
+static inline void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
+ struct fw_err_data *p_data)
+{
+}
+
+static inline int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode,
+ __le16 echo, union event_ring_data *data,
+ u8 fw_return_code)
+{
+ return 0;
+}
+#endif
+
+#define qed_for_each_vf(_p_hwfn, _i) \
+ for (_i = qed_iov_get_next_active_vf(_p_hwfn, 0); \
+ _i < MAX_NUM_VFS; \
+ _i = qed_iov_get_next_active_vf(_p_hwfn, _i + 1))
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
new file mode 100644
index 000000000..7b0e390c0
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
@@ -0,0 +1,1718 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ * Copyright (c) 2019-2020 Marvell International Ltd.
+ */
+
+#include <linux/crc32.h>
+#include <linux/etherdevice.h>
+#include "qed.h"
+#include "qed_sriov.h"
+#include "qed_vf.h"
+
+static void *qed_vf_pf_prep(struct qed_hwfn *p_hwfn, u16 type, u16 length)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ void *p_tlv;
+
+ /* This lock is released when we receive PF's response
+ * in qed_send_msg2pf().
+ * So, qed_vf_pf_prep() and qed_send_msg2pf()
+ * must come in sequence.
+ */
+ mutex_lock(&(p_iov->mutex));
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "preparing to send 0x%04x tlv over vf pf channel\n",
+ type);
+
+ /* Reset Request offset */
+ p_iov->offset = (u8 *)p_iov->vf2pf_request;
+
+ /* Clear mailbox - both request and reply */
+ memset(p_iov->vf2pf_request, 0, sizeof(union vfpf_tlvs));
+ memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs));
+
+ /* Init type and length */
+ p_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, type, length);
+
+ /* Init first tlv header */
+ ((struct vfpf_first_tlv *)p_tlv)->reply_address =
+ (u64)p_iov->pf2vf_reply_phys;
+
+ return p_tlv;
+}
+
+static void qed_vf_pf_req_end(struct qed_hwfn *p_hwfn, int req_status)
+{
+ union pfvf_tlvs *resp = p_hwfn->vf_iov_info->pf2vf_reply;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF request status = 0x%x, PF reply status = 0x%x\n",
+ req_status, resp->default_resp.hdr.status);
+
+ mutex_unlock(&(p_hwfn->vf_iov_info->mutex));
+}
+
+#define QED_VF_CHANNEL_USLEEP_ITERATIONS 90
+#define QED_VF_CHANNEL_USLEEP_DELAY 100
+#define QED_VF_CHANNEL_MSLEEP_ITERATIONS 10
+#define QED_VF_CHANNEL_MSLEEP_DELAY 25
+
+static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size)
+{
+ union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request;
+ struct ustorm_trigger_vf_zone trigger;
+ struct ustorm_vf_zone *zone_data;
+ int iter, rc = 0;
+
+ zone_data = (struct ustorm_vf_zone *)PXP_VF_BAR0_START_USDM_ZONE_B;
+
+ /* output tlvs list */
+ qed_dp_tlv_list(p_hwfn, p_req);
+
+ /* need to add the END TLV to the message size */
+ resp_size += sizeof(struct channel_list_end_tlv);
+
+ /* Send TLVs over HW channel */
+ memset(&trigger, 0, sizeof(struct ustorm_trigger_vf_zone));
+ trigger.vf_pf_msg_valid = 1;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF -> PF [%02x] message: [%08x, %08x] --> %p, %08x --> %p\n",
+ GET_FIELD(p_hwfn->hw_info.concrete_fid,
+ PXP_CONCRETE_FID_PFID),
+ upper_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys),
+ lower_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys),
+ &zone_data->non_trigger.vf_pf_msg_addr,
+ *((u32 *)&trigger), &zone_data->trigger);
+
+ REG_WR(p_hwfn,
+ (uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.lo,
+ lower_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys));
+
+ REG_WR(p_hwfn,
+ (uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.hi,
+ upper_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys));
+
+ /* The message data must be written first, to prevent trigger before
+ * data is written.
+ */
+ wmb();
+
+ REG_WR(p_hwfn, (uintptr_t)&zone_data->trigger, *((u32 *)&trigger));
+
+ /* When PF would be done with the response, it would write back to the
+ * `done' address from a coherent DMA zone. Poll until then.
+ */
+
+ iter = QED_VF_CHANNEL_USLEEP_ITERATIONS;
+ while (!*done && iter--) {
+ udelay(QED_VF_CHANNEL_USLEEP_DELAY);
+ dma_rmb();
+ }
+
+ iter = QED_VF_CHANNEL_MSLEEP_ITERATIONS;
+ while (!*done && iter--) {
+ msleep(QED_VF_CHANNEL_MSLEEP_DELAY);
+ dma_rmb();
+ }
+
+ if (!*done) {
+ DP_NOTICE(p_hwfn,
+ "VF <-- PF Timeout [Type %d]\n",
+ p_req->first_tlv.tl.type);
+ rc = -EBUSY;
+ } else {
+ if ((*done != PFVF_STATUS_SUCCESS) &&
+ (*done != PFVF_STATUS_NO_RESOURCE))
+ DP_NOTICE(p_hwfn,
+ "PF response: %d [Type %d]\n",
+ *done, p_req->first_tlv.tl.type);
+ else
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "PF response: %d [Type %d]\n",
+ *done, p_req->first_tlv.tl.type);
+ }
+
+ return rc;
+}
+
+static void qed_vf_pf_add_qid(struct qed_hwfn *p_hwfn,
+ struct qed_queue_cid *p_cid)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_qid_tlv *p_qid_tlv;
+
+ /* Only add QIDs for the queue if it was negotiated with PF */
+ if (!(p_iov->acquire_resp.pfdev_info.capabilities &
+ PFVF_ACQUIRE_CAP_QUEUE_QIDS))
+ return;
+
+ p_qid_tlv = qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_QID, sizeof(*p_qid_tlv));
+ p_qid_tlv->qid = p_cid->qid_usage_idx;
+}
+
+static int _qed_vf_pf_release(struct qed_hwfn *p_hwfn, bool b_final)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_def_resp_tlv *resp;
+ struct vfpf_first_tlv *req;
+ u32 size;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req));
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+
+ if (!rc && resp->hdr.status != PFVF_STATUS_SUCCESS)
+ rc = -EAGAIN;
+
+ qed_vf_pf_req_end(p_hwfn, rc);
+ if (!b_final)
+ return rc;
+
+ p_hwfn->b_int_enabled = 0;
+
+ if (p_iov->vf2pf_request)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(union vfpf_tlvs),
+ p_iov->vf2pf_request,
+ p_iov->vf2pf_request_phys);
+ if (p_iov->pf2vf_reply)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(union pfvf_tlvs),
+ p_iov->pf2vf_reply, p_iov->pf2vf_reply_phys);
+
+ if (p_iov->bulletin.p_virt) {
+ size = sizeof(struct qed_bulletin_content);
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ size,
+ p_iov->bulletin.p_virt, p_iov->bulletin.phys);
+ }
+
+ kfree(p_hwfn->vf_iov_info);
+ p_hwfn->vf_iov_info = NULL;
+
+ return rc;
+}
+
+int qed_vf_pf_release(struct qed_hwfn *p_hwfn)
+{
+ return _qed_vf_pf_release(p_hwfn, true);
+}
+
+#define VF_ACQUIRE_THRESH 3
+static void qed_vf_pf_acquire_reduce_resc(struct qed_hwfn *p_hwfn,
+ struct vf_pf_resc_request *p_req,
+ struct pf_vf_resc *p_resp)
+{
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "PF unwilling to fulfill resource request: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]. Try PF recommended amount\n",
+ p_req->num_rxqs,
+ p_resp->num_rxqs,
+ p_req->num_rxqs,
+ p_resp->num_txqs,
+ p_req->num_sbs,
+ p_resp->num_sbs,
+ p_req->num_mac_filters,
+ p_resp->num_mac_filters,
+ p_req->num_vlan_filters,
+ p_resp->num_vlan_filters,
+ p_req->num_mc_filters,
+ p_resp->num_mc_filters, p_req->num_cids, p_resp->num_cids);
+
+ /* humble our request */
+ p_req->num_txqs = p_resp->num_txqs;
+ p_req->num_rxqs = p_resp->num_rxqs;
+ p_req->num_sbs = p_resp->num_sbs;
+ p_req->num_mac_filters = p_resp->num_mac_filters;
+ p_req->num_vlan_filters = p_resp->num_vlan_filters;
+ p_req->num_mc_filters = p_resp->num_mc_filters;
+ p_req->num_cids = p_resp->num_cids;
+}
+
+static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp;
+ struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
+ struct vf_pf_resc_request *p_resc;
+ u8 retry_cnt = VF_ACQUIRE_THRESH;
+ bool resources_acquired = false;
+ struct vfpf_acquire_tlv *req;
+ int rc = 0, attempts = 0;
+
+ /* clear mailbox and prep first tlv */
+ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_ACQUIRE, sizeof(*req));
+ p_resc = &req->resc_request;
+
+ /* starting filling the request */
+ req->vfdev_info.opaque_fid = p_hwfn->hw_info.opaque_fid;
+
+ p_resc->num_rxqs = QED_MAX_VF_CHAINS_PER_PF;
+ p_resc->num_txqs = QED_MAX_VF_CHAINS_PER_PF;
+ p_resc->num_sbs = QED_MAX_VF_CHAINS_PER_PF;
+ p_resc->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS;
+ p_resc->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
+ p_resc->num_cids = QED_ETH_VF_DEFAULT_NUM_CIDS;
+
+ req->vfdev_info.os_type = VFPF_ACQUIRE_OS_LINUX;
+ req->vfdev_info.fw_major = FW_MAJOR_VERSION;
+ req->vfdev_info.fw_minor = FW_MINOR_VERSION;
+ req->vfdev_info.fw_revision = FW_REVISION_VERSION;
+ req->vfdev_info.fw_engineering = FW_ENGINEERING_VERSION;
+ req->vfdev_info.eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
+ req->vfdev_info.eth_fp_hsi_minor = ETH_HSI_VER_MINOR;
+
+ /* Fill capability field with any non-deprecated config we support */
+ req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_100G;
+
+ /* If we've mapped the doorbell bar, try using queue qids */
+ if (p_iov->b_doorbell_bar) {
+ req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_PHYSICAL_BAR |
+ VFPF_ACQUIRE_CAP_QUEUE_QIDS;
+ p_resc->num_cids = QED_ETH_VF_MAX_NUM_CIDS;
+ }
+
+ /* pf 2 vf bulletin board address */
+ req->bulletin_addr = p_iov->bulletin.phys;
+ req->bulletin_size = p_iov->bulletin.size;
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+ while (!resources_acquired) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV, "attempting to acquire resources\n");
+
+ /* Clear response buffer, as this might be a re-send */
+ memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs));
+
+ /* send acquire request */
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+
+ /* Re-try acquire in case of vf-pf hw channel timeout */
+ if (retry_cnt && rc == -EBUSY) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF retrying to acquire due to VPC timeout\n");
+ retry_cnt--;
+ continue;
+ }
+
+ if (rc)
+ goto exit;
+
+ /* copy acquire response from buffer to p_hwfn */
+ memcpy(&p_iov->acquire_resp, resp, sizeof(p_iov->acquire_resp));
+
+ attempts++;
+
+ if (resp->hdr.status == PFVF_STATUS_SUCCESS) {
+ /* PF agrees to allocate our resources */
+ if (!(resp->pfdev_info.capabilities &
+ PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE)) {
+ /* It's possible legacy PF mistakenly accepted;
+ * but we don't care - simply mark it as
+ * legacy and continue.
+ */
+ req->vfdev_info.capabilities |=
+ VFPF_ACQUIRE_CAP_PRE_FP_HSI;
+ }
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV, "resources acquired\n");
+ resources_acquired = true;
+ } else if (resp->hdr.status == PFVF_STATUS_NO_RESOURCE &&
+ attempts < VF_ACQUIRE_THRESH) {
+ qed_vf_pf_acquire_reduce_resc(p_hwfn, p_resc,
+ &resp->resc);
+ } else if (resp->hdr.status == PFVF_STATUS_NOT_SUPPORTED) {
+ if (pfdev_info->major_fp_hsi &&
+ (pfdev_info->major_fp_hsi != ETH_HSI_VER_MAJOR)) {
+ DP_NOTICE(p_hwfn,
+ "PF uses an incompatible fastpath HSI %02x.%02x [VF requires %02x.%02x]. Please change to a VF driver using %02x.xx.\n",
+ pfdev_info->major_fp_hsi,
+ pfdev_info->minor_fp_hsi,
+ ETH_HSI_VER_MAJOR,
+ ETH_HSI_VER_MINOR,
+ pfdev_info->major_fp_hsi);
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ if (!pfdev_info->major_fp_hsi) {
+ if (req->vfdev_info.capabilities &
+ VFPF_ACQUIRE_CAP_PRE_FP_HSI) {
+ DP_NOTICE(p_hwfn,
+ "PF uses very old drivers. Please change to a VF driver using no later than 8.8.x.x.\n");
+ rc = -EINVAL;
+ goto exit;
+ } else {
+ DP_INFO(p_hwfn,
+ "PF is old - try re-acquire to see if it supports FW-version override\n");
+ req->vfdev_info.capabilities |=
+ VFPF_ACQUIRE_CAP_PRE_FP_HSI;
+ continue;
+ }
+ }
+
+ /* If PF/VF are using same Major, PF must have had
+ * it's reasons. Simply fail.
+ */
+ DP_NOTICE(p_hwfn, "PF rejected acquisition by VF\n");
+ rc = -EINVAL;
+ goto exit;
+ } else {
+ DP_ERR(p_hwfn,
+ "PF returned error %d to VF acquisition request\n",
+ resp->hdr.status);
+ rc = -EAGAIN;
+ goto exit;
+ }
+ }
+
+ /* Mark the PF as legacy, if needed */
+ if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_PRE_FP_HSI)
+ p_iov->b_pre_fp_hsi = true;
+
+ /* In case PF doesn't support multi-queue Tx, update the number of
+ * CIDs to reflect the number of queues [older PFs didn't fill that
+ * field].
+ */
+ if (!(resp->pfdev_info.capabilities & PFVF_ACQUIRE_CAP_QUEUE_QIDS))
+ resp->resc.num_cids = resp->resc.num_rxqs + resp->resc.num_txqs;
+
+ /* Update bulletin board size with response from PF */
+ p_iov->bulletin.size = resp->bulletin_size;
+
+ /* get HW info */
+ p_hwfn->cdev->type = resp->pfdev_info.dev_type;
+ p_hwfn->cdev->chip_rev = resp->pfdev_info.chip_rev;
+
+ p_hwfn->cdev->chip_num = pfdev_info->chip_num & 0xffff;
+
+ /* Learn of the possibility of CMT */
+ if (IS_LEAD_HWFN(p_hwfn)) {
+ if (resp->pfdev_info.capabilities & PFVF_ACQUIRE_CAP_100G) {
+ DP_NOTICE(p_hwfn, "100g VF\n");
+ p_hwfn->cdev->num_hwfns = 2;
+ }
+ }
+
+ if (!p_iov->b_pre_fp_hsi &&
+ (resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR)) {
+ DP_INFO(p_hwfn,
+ "PF is using older fastpath HSI; %02x.%02x is configured\n",
+ ETH_HSI_VER_MAJOR, resp->pfdev_info.minor_fp_hsi);
+ }
+
+exit:
+ qed_vf_pf_req_end(p_hwfn, rc);
+
+ return rc;
+}
+
+u32 qed_vf_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id)
+{
+ u32 bar_size;
+
+ /* Regview size is fixed */
+ if (bar_id == BAR_ID_0)
+ return 1 << 17;
+
+ /* Doorbell is received from PF */
+ bar_size = p_hwfn->vf_iov_info->acquire_resp.pfdev_info.bar_size;
+ if (bar_size)
+ return 1 << bar_size;
+ return 0;
+}
+
+int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
+{
+ struct qed_hwfn *p_lead = QED_LEADING_HWFN(p_hwfn->cdev);
+ struct qed_vf_iov *p_iov;
+ u32 reg;
+ int rc;
+
+ /* Set number of hwfns - might be overridden once leading hwfn learns
+ * actual configuration from PF.
+ */
+ if (IS_LEAD_HWFN(p_hwfn))
+ p_hwfn->cdev->num_hwfns = 1;
+
+ reg = PXP_VF_BAR0_ME_OPAQUE_ADDRESS;
+ p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, reg);
+
+ reg = PXP_VF_BAR0_ME_CONCRETE_ADDRESS;
+ p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, reg);
+
+ /* Allocate vf sriov info */
+ p_iov = kzalloc(sizeof(*p_iov), GFP_KERNEL);
+ if (!p_iov)
+ return -ENOMEM;
+
+ /* Doorbells are tricky; Upper-layer has alreday set the hwfn doorbell
+ * value, but there are several incompatibily scenarios where that
+ * would be incorrect and we'd need to override it.
+ */
+ if (!p_hwfn->doorbells) {
+ p_hwfn->doorbells = (u8 __iomem *)p_hwfn->regview +
+ PXP_VF_BAR0_START_DQ;
+ } else if (p_hwfn == p_lead) {
+ /* For leading hw-function, value is always correct, but need
+ * to handle scenario where legacy PF would not support 100g
+ * mapped bars later.
+ */
+ p_iov->b_doorbell_bar = true;
+ } else {
+ /* here, value would be correct ONLY if the leading hwfn
+ * received indication that mapped-bars are supported.
+ */
+ if (p_lead->vf_iov_info->b_doorbell_bar)
+ p_iov->b_doorbell_bar = true;
+ else
+ p_hwfn->doorbells = (u8 __iomem *)
+ p_hwfn->regview + PXP_VF_BAR0_START_DQ;
+ }
+
+ /* Allocate vf2pf msg */
+ p_iov->vf2pf_request = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(union vfpf_tlvs),
+ &p_iov->vf2pf_request_phys,
+ GFP_KERNEL);
+ if (!p_iov->vf2pf_request)
+ goto free_p_iov;
+
+ p_iov->pf2vf_reply = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(union pfvf_tlvs),
+ &p_iov->pf2vf_reply_phys,
+ GFP_KERNEL);
+ if (!p_iov->pf2vf_reply)
+ goto free_vf2pf_request;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF's Request mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys]\n",
+ p_iov->vf2pf_request,
+ (u64)p_iov->vf2pf_request_phys,
+ p_iov->pf2vf_reply, (u64)p_iov->pf2vf_reply_phys);
+
+ /* Allocate Bulletin board */
+ p_iov->bulletin.size = sizeof(struct qed_bulletin_content);
+ p_iov->bulletin.p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ p_iov->bulletin.size,
+ &p_iov->bulletin.phys,
+ GFP_KERNEL);
+ if (!p_iov->bulletin.p_virt)
+ goto free_pf2vf_reply;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF's bulletin Board [%p virt 0x%llx phys 0x%08x bytes]\n",
+ p_iov->bulletin.p_virt,
+ (u64)p_iov->bulletin.phys, p_iov->bulletin.size);
+
+ mutex_init(&p_iov->mutex);
+
+ p_hwfn->vf_iov_info = p_iov;
+
+ p_hwfn->hw_info.personality = QED_PCI_ETH;
+
+ rc = qed_vf_pf_acquire(p_hwfn);
+
+ /* If VF is 100g using a mapped bar and PF is too old to support that,
+ * acquisition would succeed - but the VF would have no way knowing
+ * the size of the doorbell bar configured in HW and thus will not
+ * know how to split it for 2nd hw-function.
+ * In this case we re-try without the indication of the mapped
+ * doorbell.
+ */
+ if (!rc && p_iov->b_doorbell_bar &&
+ !qed_vf_hw_bar_size(p_hwfn, BAR_ID_1) &&
+ (p_hwfn->cdev->num_hwfns > 1)) {
+ rc = _qed_vf_pf_release(p_hwfn, false);
+ if (rc)
+ return rc;
+
+ p_iov->b_doorbell_bar = false;
+ p_hwfn->doorbells = (u8 __iomem *)p_hwfn->regview +
+ PXP_VF_BAR0_START_DQ;
+ rc = qed_vf_pf_acquire(p_hwfn);
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Regview [%p], Doorbell [%p], Device-doorbell [%p]\n",
+ p_hwfn->regview, p_hwfn->doorbells, p_hwfn->cdev->doorbells);
+
+ return rc;
+
+free_pf2vf_reply:
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(union pfvf_tlvs),
+ p_iov->pf2vf_reply, p_iov->pf2vf_reply_phys);
+free_vf2pf_request:
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(union vfpf_tlvs),
+ p_iov->vf2pf_request, p_iov->vf2pf_request_phys);
+free_p_iov:
+ kfree(p_iov);
+
+ return -ENOMEM;
+}
+
+#define TSTORM_QZONE_START PXP_VF_BAR0_START_SDM_ZONE_A
+#define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \
+ (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))
+
+static void
+__qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
+ struct qed_tunn_update_type *p_src,
+ enum qed_tunn_mode mask, u8 *p_cls)
+{
+ if (p_src->b_update_mode) {
+ p_req->tun_mode_update_mask |= BIT(mask);
+
+ if (p_src->b_mode_enabled)
+ p_req->tunn_mode |= BIT(mask);
+ }
+
+ *p_cls = p_src->tun_cls;
+}
+
+static void
+qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req,
+ struct qed_tunn_update_type *p_src,
+ enum qed_tunn_mode mask,
+ u8 *p_cls, struct qed_tunn_update_udp_port *p_port,
+ u8 *p_update_port, u16 *p_udp_port)
+{
+ if (p_port->b_update_port) {
+ *p_update_port = 1;
+ *p_udp_port = p_port->port;
+ }
+
+ __qed_vf_prep_tunn_req_tlv(p_req, p_src, mask, p_cls);
+}
+
+void qed_vf_set_vf_start_tunn_update_param(struct qed_tunnel_info *p_tun)
+{
+ if (p_tun->vxlan.b_mode_enabled)
+ p_tun->vxlan.b_update_mode = true;
+ if (p_tun->l2_geneve.b_mode_enabled)
+ p_tun->l2_geneve.b_update_mode = true;
+ if (p_tun->ip_geneve.b_mode_enabled)
+ p_tun->ip_geneve.b_update_mode = true;
+ if (p_tun->l2_gre.b_mode_enabled)
+ p_tun->l2_gre.b_update_mode = true;
+ if (p_tun->ip_gre.b_mode_enabled)
+ p_tun->ip_gre.b_update_mode = true;
+
+ p_tun->b_update_rx_cls = true;
+ p_tun->b_update_tx_cls = true;
+}
+
+static void
+__qed_vf_update_tunn_param(struct qed_tunn_update_type *p_tun,
+ u16 feature_mask, u8 tunn_mode,
+ u8 tunn_cls, enum qed_tunn_mode val)
+{
+ if (feature_mask & BIT(val)) {
+ p_tun->b_mode_enabled = tunn_mode;
+ p_tun->tun_cls = tunn_cls;
+ } else {
+ p_tun->b_mode_enabled = false;
+ }
+}
+
+static void qed_vf_update_tunn_param(struct qed_hwfn *p_hwfn,
+ struct qed_tunnel_info *p_tun,
+ struct pfvf_update_tunn_param_tlv *p_resp)
+{
+ /* Update mode and classes provided by PF */
+ u16 feat_mask = p_resp->tunn_feature_mask;
+
+ __qed_vf_update_tunn_param(&p_tun->vxlan, feat_mask,
+ p_resp->vxlan_mode, p_resp->vxlan_clss,
+ QED_MODE_VXLAN_TUNN);
+ __qed_vf_update_tunn_param(&p_tun->l2_geneve, feat_mask,
+ p_resp->l2geneve_mode,
+ p_resp->l2geneve_clss,
+ QED_MODE_L2GENEVE_TUNN);
+ __qed_vf_update_tunn_param(&p_tun->ip_geneve, feat_mask,
+ p_resp->ipgeneve_mode,
+ p_resp->ipgeneve_clss,
+ QED_MODE_IPGENEVE_TUNN);
+ __qed_vf_update_tunn_param(&p_tun->l2_gre, feat_mask,
+ p_resp->l2gre_mode, p_resp->l2gre_clss,
+ QED_MODE_L2GRE_TUNN);
+ __qed_vf_update_tunn_param(&p_tun->ip_gre, feat_mask,
+ p_resp->ipgre_mode, p_resp->ipgre_clss,
+ QED_MODE_IPGRE_TUNN);
+ p_tun->geneve_port.port = p_resp->geneve_udp_port;
+ p_tun->vxlan_port.port = p_resp->vxlan_udp_port;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "tunn mode: vxlan=0x%x, l2geneve=0x%x, ipgeneve=0x%x, l2gre=0x%x, ipgre=0x%x",
+ p_tun->vxlan.b_mode_enabled, p_tun->l2_geneve.b_mode_enabled,
+ p_tun->ip_geneve.b_mode_enabled,
+ p_tun->l2_gre.b_mode_enabled, p_tun->ip_gre.b_mode_enabled);
+}
+
+int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn,
+ struct qed_tunnel_info *p_src)
+{
+ struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel;
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_update_tunn_param_tlv *p_resp;
+ struct vfpf_update_tunn_param_tlv *p_req;
+ int rc;
+
+ p_req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_UPDATE_TUNN_PARAM,
+ sizeof(*p_req));
+
+ if (p_src->b_update_rx_cls && p_src->b_update_tx_cls)
+ p_req->update_tun_cls = 1;
+
+ qed_vf_prep_tunn_req_tlv(p_req, &p_src->vxlan, QED_MODE_VXLAN_TUNN,
+ &p_req->vxlan_clss, &p_src->vxlan_port,
+ &p_req->update_vxlan_port,
+ &p_req->vxlan_port);
+ qed_vf_prep_tunn_req_tlv(p_req, &p_src->l2_geneve,
+ QED_MODE_L2GENEVE_TUNN,
+ &p_req->l2geneve_clss, &p_src->geneve_port,
+ &p_req->update_geneve_port,
+ &p_req->geneve_port);
+ __qed_vf_prep_tunn_req_tlv(p_req, &p_src->ip_geneve,
+ QED_MODE_IPGENEVE_TUNN,
+ &p_req->ipgeneve_clss);
+ __qed_vf_prep_tunn_req_tlv(p_req, &p_src->l2_gre,
+ QED_MODE_L2GRE_TUNN, &p_req->l2gre_clss);
+ __qed_vf_prep_tunn_req_tlv(p_req, &p_src->ip_gre,
+ QED_MODE_IPGRE_TUNN, &p_req->ipgre_clss);
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ p_resp = &p_iov->pf2vf_reply->tunn_param_resp;
+ rc = qed_send_msg2pf(p_hwfn, &p_resp->hdr.status, sizeof(*p_resp));
+
+ if (rc)
+ goto exit;
+
+ if (p_resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Failed to update tunnel parameters\n");
+ rc = -EINVAL;
+ }
+
+ qed_vf_update_tunn_param(p_hwfn, p_tun, p_resp);
+exit:
+ qed_vf_pf_req_end(p_hwfn, rc);
+ return rc;
+}
+
+int
+qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
+ struct qed_queue_cid *p_cid,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size, void __iomem **pp_prod)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_start_queue_resp_tlv *resp;
+ struct vfpf_start_rxq_tlv *req;
+ u8 rx_qid = p_cid->rel.queue_id;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_RXQ, sizeof(*req));
+
+ req->rx_qid = rx_qid;
+ req->cqe_pbl_addr = cqe_pbl_addr;
+ req->cqe_pbl_size = cqe_pbl_size;
+ req->rxq_addr = bd_chain_phys_addr;
+ req->hw_sb = p_cid->sb_igu_id;
+ req->sb_index = p_cid->sb_idx;
+ req->bd_max_bytes = bd_max_bytes;
+ req->stat_id = -1;
+
+ /* If PF is legacy, we'll need to calculate producers ourselves
+ * as well as clean them.
+ */
+ if (p_iov->b_pre_fp_hsi) {
+ u8 hw_qid = p_iov->acquire_resp.resc.hw_qid[rx_qid];
+ u32 init_prod_val = 0;
+
+ *pp_prod = (u8 __iomem *)
+ p_hwfn->regview +
+ MSTORM_QZONE_START(p_hwfn->cdev) +
+ hw_qid * MSTORM_QZONE_SIZE;
+
+ /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
+ __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
+ (u32 *)(&init_prod_val));
+ }
+
+ qed_vf_pf_add_qid(p_hwfn, p_cid);
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->queue_start;
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ goto exit;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ /* Learn the address of the producer from the response */
+ if (!p_iov->b_pre_fp_hsi) {
+ u32 init_prod_val = 0;
+
+ *pp_prod = (u8 __iomem *)p_hwfn->regview + resp->offset;
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Rxq[0x%02x]: producer at %p [offset 0x%08x]\n",
+ rx_qid, *pp_prod, resp->offset);
+
+ /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
+ __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
+ (u32 *)&init_prod_val);
+ }
+exit:
+ qed_vf_pf_req_end(p_hwfn, rc);
+
+ return rc;
+}
+
+int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
+ struct qed_queue_cid *p_cid, bool cqe_completion)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_stop_rxqs_tlv *req;
+ struct pfvf_def_resp_tlv *resp;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_RXQS, sizeof(*req));
+
+ req->rx_qid = p_cid->rel.queue_id;
+ req->num_rxqs = 1;
+ req->cqe_completion = cqe_completion;
+
+ qed_vf_pf_add_qid(p_hwfn, p_cid);
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ goto exit;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = -EINVAL;
+ goto exit;
+ }
+
+exit:
+ qed_vf_pf_req_end(p_hwfn, rc);
+
+ return rc;
+}
+
+int
+qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
+ struct qed_queue_cid *p_cid,
+ dma_addr_t pbl_addr,
+ u16 pbl_size, void __iomem **pp_doorbell)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_start_queue_resp_tlv *resp;
+ struct vfpf_start_txq_tlv *req;
+ u16 qid = p_cid->rel.queue_id;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_TXQ, sizeof(*req));
+
+ req->tx_qid = qid;
+
+ /* Tx */
+ req->pbl_addr = pbl_addr;
+ req->pbl_size = pbl_size;
+ req->hw_sb = p_cid->sb_igu_id;
+ req->sb_index = p_cid->sb_idx;
+
+ qed_vf_pf_add_qid(p_hwfn, p_cid);
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->queue_start;
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ goto exit;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ /* Modern PFs provide the actual offsets, while legacy
+ * provided only the queue id.
+ */
+ if (!p_iov->b_pre_fp_hsi) {
+ *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + resp->offset;
+ } else {
+ u8 cid = p_iov->acquire_resp.resc.cid[qid];
+
+ *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells +
+ qed_db_addr_vf(cid,
+ DQ_DEMS_LEGACY);
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Txq[0x%02x.%02x]: doorbell at %p [offset 0x%08x]\n",
+ qid, p_cid->qid_usage_idx, *pp_doorbell, resp->offset);
+exit:
+ qed_vf_pf_req_end(p_hwfn, rc);
+
+ return rc;
+}
+
+int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_stop_txqs_tlv *req;
+ struct pfvf_def_resp_tlv *resp;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_TXQS, sizeof(*req));
+
+ req->tx_qid = p_cid->rel.queue_id;
+ req->num_txqs = 1;
+
+ qed_vf_pf_add_qid(p_hwfn, p_cid);
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ goto exit;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = -EINVAL;
+ goto exit;
+ }
+
+exit:
+ qed_vf_pf_req_end(p_hwfn, rc);
+
+ return rc;
+}
+
+int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
+ u8 vport_id,
+ u16 mtu,
+ u8 inner_vlan_removal,
+ enum qed_tpa_mode tpa_mode,
+ u8 max_buffers_per_cqe, u8 only_untagged)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_vport_start_tlv *req;
+ struct pfvf_def_resp_tlv *resp;
+ int rc, i;
+
+ /* clear mailbox and prep first tlv */
+ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_START, sizeof(*req));
+
+ req->mtu = mtu;
+ req->vport_id = vport_id;
+ req->inner_vlan_removal = inner_vlan_removal;
+ req->tpa_mode = tpa_mode;
+ req->max_buffers_per_cqe = max_buffers_per_cqe;
+ req->only_untagged = only_untagged;
+
+ /* status blocks */
+ for (i = 0; i < p_hwfn->vf_iov_info->acquire_resp.resc.num_sbs; i++) {
+ struct qed_sb_info *p_sb = p_hwfn->vf_iov_info->sbs_info[i];
+
+ if (p_sb)
+ req->sb_addr[i] = p_sb->sb_phys;
+ }
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ goto exit;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = -EINVAL;
+ goto exit;
+ }
+
+exit:
+ qed_vf_pf_req_end(p_hwfn, rc);
+
+ return rc;
+}
+
+int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_TEARDOWN,
+ sizeof(struct vfpf_first_tlv));
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ goto exit;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = -EINVAL;
+ goto exit;
+ }
+
+exit:
+ qed_vf_pf_req_end(p_hwfn, rc);
+
+ return rc;
+}
+
+static bool
+qed_vf_handle_vp_update_is_needed(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_data,
+ u16 tlv)
+{
+ switch (tlv) {
+ case CHANNEL_TLV_VPORT_UPDATE_ACTIVATE:
+ return !!(p_data->update_vport_active_rx_flg ||
+ p_data->update_vport_active_tx_flg);
+ case CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH:
+ return !!p_data->update_tx_switching_flg;
+ case CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP:
+ return !!p_data->update_inner_vlan_removal_flg;
+ case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN:
+ return !!p_data->update_accept_any_vlan_flg;
+ case CHANNEL_TLV_VPORT_UPDATE_MCAST:
+ return !!p_data->update_approx_mcast_flg;
+ case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM:
+ return !!(p_data->accept_flags.update_rx_mode_config ||
+ p_data->accept_flags.update_tx_mode_config);
+ case CHANNEL_TLV_VPORT_UPDATE_RSS:
+ return !!p_data->rss_params;
+ case CHANNEL_TLV_VPORT_UPDATE_SGE_TPA:
+ return !!p_data->sge_tpa_params;
+ default:
+ DP_INFO(p_hwfn, "Unexpected vport-update TLV[%d]\n",
+ tlv);
+ return false;
+ }
+}
+
+static void
+qed_vf_handle_vp_update_tlvs_resp(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_data)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_def_resp_tlv *p_resp;
+ u16 tlv;
+
+ for (tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
+ tlv < CHANNEL_TLV_VPORT_UPDATE_MAX; tlv++) {
+ if (!qed_vf_handle_vp_update_is_needed(p_hwfn, p_data, tlv))
+ continue;
+
+ p_resp = (struct pfvf_def_resp_tlv *)
+ qed_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply,
+ tlv);
+ if (p_resp && p_resp->hdr.status)
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "TLV[%d] Configuration %s\n",
+ tlv,
+ (p_resp && p_resp->hdr.status) ? "succeeded"
+ : "failed");
+ }
+}
+
+int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_params)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_vport_update_tlv *req;
+ struct pfvf_def_resp_tlv *resp;
+ u8 update_rx, update_tx;
+ u32 resp_size = 0;
+ u16 size, tlv;
+ int rc;
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ resp_size = sizeof(*resp);
+
+ update_rx = p_params->update_vport_active_rx_flg;
+ update_tx = p_params->update_vport_active_tx_flg;
+
+ /* clear mailbox and prep header tlv */
+ qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_UPDATE, sizeof(*req));
+
+ /* Prepare extended tlvs */
+ if (update_rx || update_tx) {
+ struct vfpf_vport_update_activate_tlv *p_act_tlv;
+
+ size = sizeof(struct vfpf_vport_update_activate_tlv);
+ p_act_tlv = qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,
+ size);
+ resp_size += sizeof(struct pfvf_def_resp_tlv);
+
+ if (update_rx) {
+ p_act_tlv->update_rx = update_rx;
+ p_act_tlv->active_rx = p_params->vport_active_rx_flg;
+ }
+
+ if (update_tx) {
+ p_act_tlv->update_tx = update_tx;
+ p_act_tlv->active_tx = p_params->vport_active_tx_flg;
+ }
+ }
+
+ if (p_params->update_tx_switching_flg) {
+ struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
+
+ size = sizeof(struct vfpf_vport_update_tx_switch_tlv);
+ tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
+ p_tx_switch_tlv = qed_add_tlv(p_hwfn, &p_iov->offset,
+ tlv, size);
+ resp_size += sizeof(struct pfvf_def_resp_tlv);
+
+ p_tx_switch_tlv->tx_switching = p_params->tx_switching_flg;
+ }
+
+ if (p_params->update_approx_mcast_flg) {
+ struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
+
+ size = sizeof(struct vfpf_vport_update_mcast_bin_tlv);
+ p_mcast_tlv = qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_VPORT_UPDATE_MCAST, size);
+ resp_size += sizeof(struct pfvf_def_resp_tlv);
+
+ memcpy(p_mcast_tlv->bins, p_params->bins,
+ sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
+ }
+
+ update_rx = p_params->accept_flags.update_rx_mode_config;
+ update_tx = p_params->accept_flags.update_tx_mode_config;
+
+ if (update_rx || update_tx) {
+ struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
+
+ tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
+ size = sizeof(struct vfpf_vport_update_accept_param_tlv);
+ p_accept_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, tlv, size);
+ resp_size += sizeof(struct pfvf_def_resp_tlv);
+
+ if (update_rx) {
+ p_accept_tlv->update_rx_mode = update_rx;
+ p_accept_tlv->rx_accept_filter =
+ p_params->accept_flags.rx_accept_filter;
+ }
+
+ if (update_tx) {
+ p_accept_tlv->update_tx_mode = update_tx;
+ p_accept_tlv->tx_accept_filter =
+ p_params->accept_flags.tx_accept_filter;
+ }
+ }
+
+ if (p_params->rss_params) {
+ struct qed_rss_params *rss_params = p_params->rss_params;
+ struct vfpf_vport_update_rss_tlv *p_rss_tlv;
+ int i, table_size;
+
+ size = sizeof(struct vfpf_vport_update_rss_tlv);
+ p_rss_tlv = qed_add_tlv(p_hwfn,
+ &p_iov->offset,
+ CHANNEL_TLV_VPORT_UPDATE_RSS, size);
+ resp_size += sizeof(struct pfvf_def_resp_tlv);
+
+ if (rss_params->update_rss_config)
+ p_rss_tlv->update_rss_flags |=
+ VFPF_UPDATE_RSS_CONFIG_FLAG;
+ if (rss_params->update_rss_capabilities)
+ p_rss_tlv->update_rss_flags |=
+ VFPF_UPDATE_RSS_CAPS_FLAG;
+ if (rss_params->update_rss_ind_table)
+ p_rss_tlv->update_rss_flags |=
+ VFPF_UPDATE_RSS_IND_TABLE_FLAG;
+ if (rss_params->update_rss_key)
+ p_rss_tlv->update_rss_flags |= VFPF_UPDATE_RSS_KEY_FLAG;
+
+ p_rss_tlv->rss_enable = rss_params->rss_enable;
+ p_rss_tlv->rss_caps = rss_params->rss_caps;
+ p_rss_tlv->rss_table_size_log = rss_params->rss_table_size_log;
+
+ table_size = min_t(int, T_ETH_INDIRECTION_TABLE_SIZE,
+ 1 << p_rss_tlv->rss_table_size_log);
+ for (i = 0; i < table_size; i++) {
+ struct qed_queue_cid *p_queue;
+
+ p_queue = rss_params->rss_ind_table[i];
+ p_rss_tlv->rss_ind_table[i] = p_queue->rel.queue_id;
+ }
+ memcpy(p_rss_tlv->rss_key, rss_params->rss_key,
+ sizeof(rss_params->rss_key));
+ }
+
+ if (p_params->update_accept_any_vlan_flg) {
+ struct vfpf_vport_update_accept_any_vlan_tlv *p_any_vlan_tlv;
+
+ size = sizeof(struct vfpf_vport_update_accept_any_vlan_tlv);
+ tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
+ p_any_vlan_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, tlv, size);
+
+ resp_size += sizeof(struct pfvf_def_resp_tlv);
+ p_any_vlan_tlv->accept_any_vlan = p_params->accept_any_vlan;
+ p_any_vlan_tlv->update_accept_any_vlan_flg =
+ p_params->update_accept_any_vlan_flg;
+ }
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, resp_size);
+ if (rc)
+ goto exit;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ qed_vf_handle_vp_update_tlvs_resp(p_hwfn, p_params);
+
+exit:
+ qed_vf_pf_req_end(p_hwfn, rc);
+
+ return rc;
+}
+
+int qed_vf_pf_reset(struct qed_hwfn *p_hwfn)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_def_resp_tlv *resp;
+ struct vfpf_first_tlv *req;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_CLOSE, sizeof(*req));
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ goto exit;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = -EAGAIN;
+ goto exit;
+ }
+
+ p_hwfn->b_int_enabled = 0;
+
+exit:
+ qed_vf_pf_req_end(p_hwfn, rc);
+
+ return rc;
+}
+
+void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
+ struct qed_filter_mcast *p_filter_cmd)
+{
+ struct qed_sp_vport_update_params sp_params;
+ int i;
+
+ memset(&sp_params, 0, sizeof(sp_params));
+ sp_params.update_approx_mcast_flg = 1;
+
+ if (p_filter_cmd->opcode == QED_FILTER_ADD) {
+ for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
+ u32 bit;
+
+ bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
+ sp_params.bins[bit / 32] |= 1 << (bit % 32);
+ }
+ }
+
+ qed_vf_pf_vport_update(p_hwfn, &sp_params);
+}
+
+int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
+ struct qed_filter_ucast *p_ucast)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_ucast_filter_tlv *req;
+ struct pfvf_def_resp_tlv *resp;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_UCAST_FILTER, sizeof(*req));
+ req->opcode = (u8)p_ucast->opcode;
+ req->type = (u8)p_ucast->type;
+ memcpy(req->mac, p_ucast->mac, ETH_ALEN);
+ req->vlan = p_ucast->vlan;
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ goto exit;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = -EAGAIN;
+ goto exit;
+ }
+
+exit:
+ qed_vf_pf_req_end(p_hwfn, rc);
+
+ return rc;
+}
+
+int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_INT_CLEANUP,
+ sizeof(struct vfpf_first_tlv));
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ goto exit;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = -EINVAL;
+ goto exit;
+ }
+
+exit:
+ qed_vf_pf_req_end(p_hwfn, rc);
+
+ return rc;
+}
+
+int qed_vf_pf_get_coalesce(struct qed_hwfn *p_hwfn,
+ u16 *p_coal, struct qed_queue_cid *p_cid)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_read_coal_resp_tlv *resp;
+ struct vfpf_read_coal_req_tlv *req;
+ int rc;
+
+ /* clear mailbox and prep header tlv */
+ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_COALESCE_READ, sizeof(*req));
+ req->qid = p_cid->rel.queue_id;
+ req->is_rx = p_cid->b_is_rx ? 1 : 0;
+
+ qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+ resp = &p_iov->pf2vf_reply->read_coal_resp;
+
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ goto exit;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ goto exit;
+
+ *p_coal = resp->coal;
+exit:
+ qed_vf_pf_req_end(p_hwfn, rc);
+
+ return rc;
+}
+
+int
+qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn,
+ const u8 *p_mac)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_bulletin_update_mac_tlv *p_req;
+ struct pfvf_def_resp_tlv *p_resp;
+ int rc;
+
+ if (!p_mac)
+ return -EINVAL;
+
+ /* clear mailbox and prep header tlv */
+ p_req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_BULLETIN_UPDATE_MAC,
+ sizeof(*p_req));
+ ether_addr_copy(p_req->mac, p_mac);
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Requesting bulletin update for MAC[%pM]\n", p_mac);
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ p_resp = &p_iov->pf2vf_reply->default_resp;
+ rc = qed_send_msg2pf(p_hwfn, &p_resp->hdr.status, sizeof(*p_resp));
+ qed_vf_pf_req_end(p_hwfn, rc);
+ return rc;
+}
+
+int
+qed_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn,
+ u16 rx_coal, u16 tx_coal, struct qed_queue_cid *p_cid)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_update_coalesce *req;
+ struct pfvf_def_resp_tlv *resp;
+ int rc;
+
+ /* clear mailbox and prep header tlv */
+ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_COALESCE_UPDATE, sizeof(*req));
+
+ req->rx_coal = rx_coal;
+ req->tx_coal = tx_coal;
+ req->qid = p_cid->rel.queue_id;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "Setting coalesce rx_coal = %d, tx_coal = %d at queue = %d\n",
+ rx_coal, tx_coal, req->qid);
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ goto exit;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ goto exit;
+
+ if (rx_coal)
+ p_hwfn->cdev->rx_coalesce_usecs = rx_coal;
+
+ if (tx_coal)
+ p_hwfn->cdev->tx_coalesce_usecs = tx_coal;
+
+exit:
+ qed_vf_pf_req_end(p_hwfn, rc);
+ return rc;
+}
+
+u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+
+ if (!p_iov) {
+ DP_NOTICE(p_hwfn, "vf_sriov_info isn't initialized\n");
+ return 0;
+ }
+
+ return p_iov->acquire_resp.resc.hw_sbs[sb_id].hw_sb_id;
+}
+
+void qed_vf_set_sb_info(struct qed_hwfn *p_hwfn,
+ u16 sb_id, struct qed_sb_info *p_sb)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+
+ if (!p_iov) {
+ DP_NOTICE(p_hwfn, "vf_sriov_info isn't initialized\n");
+ return;
+ }
+
+ if (sb_id >= PFVF_MAX_SBS_PER_VF) {
+ DP_NOTICE(p_hwfn, "Can't configure SB %04x\n", sb_id);
+ return;
+ }
+
+ p_iov->sbs_info[sb_id] = p_sb;
+}
+
+int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct qed_bulletin_content shadow;
+ u32 crc, crc_size;
+
+ crc_size = sizeof(p_iov->bulletin.p_virt->crc);
+ *p_change = 0;
+
+ /* Need to guarantee PF is not in the middle of writing it */
+ memcpy(&shadow, p_iov->bulletin.p_virt, p_iov->bulletin.size);
+
+ /* If version did not update, no need to do anything */
+ if (shadow.version == p_iov->bulletin_shadow.version)
+ return 0;
+
+ /* Verify the bulletin we see is valid */
+ crc = crc32(0, (u8 *)&shadow + crc_size,
+ p_iov->bulletin.size - crc_size);
+ if (crc != shadow.crc)
+ return -EAGAIN;
+
+ /* Set the shadow bulletin and process it */
+ memcpy(&p_iov->bulletin_shadow, &shadow, p_iov->bulletin.size);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Read a bulletin update %08x\n", shadow.version);
+
+ *p_change = 1;
+
+ return 0;
+}
+
+void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_params *p_params,
+ struct qed_bulletin_content *p_bulletin)
+{
+ memset(p_params, 0, sizeof(*p_params));
+
+ p_params->speed.autoneg = p_bulletin->req_autoneg;
+ p_params->speed.advertised_speeds = p_bulletin->req_adv_speed;
+ p_params->speed.forced_speed = p_bulletin->req_forced_speed;
+ p_params->pause.autoneg = p_bulletin->req_autoneg_pause;
+ p_params->pause.forced_rx = p_bulletin->req_forced_rx;
+ p_params->pause.forced_tx = p_bulletin->req_forced_tx;
+ p_params->loopback_mode = p_bulletin->req_loopback;
+}
+
+void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_params *params)
+{
+ __qed_vf_get_link_params(p_hwfn, params,
+ &(p_hwfn->vf_iov_info->bulletin_shadow));
+}
+
+void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_state *p_link,
+ struct qed_bulletin_content *p_bulletin)
+{
+ memset(p_link, 0, sizeof(*p_link));
+
+ p_link->link_up = p_bulletin->link_up;
+ p_link->speed = p_bulletin->speed;
+ p_link->full_duplex = p_bulletin->full_duplex;
+ p_link->an = p_bulletin->autoneg;
+ p_link->an_complete = p_bulletin->autoneg_complete;
+ p_link->parallel_detection = p_bulletin->parallel_detection;
+ p_link->pfc_enabled = p_bulletin->pfc_enabled;
+ p_link->partner_adv_speed = p_bulletin->partner_adv_speed;
+ p_link->partner_tx_flow_ctrl_en = p_bulletin->partner_tx_flow_ctrl_en;
+ p_link->partner_rx_flow_ctrl_en = p_bulletin->partner_rx_flow_ctrl_en;
+ p_link->partner_adv_pause = p_bulletin->partner_adv_pause;
+ p_link->sfp_tx_fault = p_bulletin->sfp_tx_fault;
+}
+
+void qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_state *link)
+{
+ __qed_vf_get_link_state(p_hwfn, link,
+ &(p_hwfn->vf_iov_info->bulletin_shadow));
+}
+
+void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_capabilities *p_link_caps,
+ struct qed_bulletin_content *p_bulletin)
+{
+ memset(p_link_caps, 0, sizeof(*p_link_caps));
+ p_link_caps->speed_capabilities = p_bulletin->capability_speed;
+}
+
+void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_capabilities *p_link_caps)
+{
+ __qed_vf_get_link_caps(p_hwfn, p_link_caps,
+ &(p_hwfn->vf_iov_info->bulletin_shadow));
+}
+
+void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs)
+{
+ *num_rxqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_rxqs;
+}
+
+void qed_vf_get_num_txqs(struct qed_hwfn *p_hwfn, u8 *num_txqs)
+{
+ *num_txqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_txqs;
+}
+
+void qed_vf_get_num_cids(struct qed_hwfn *p_hwfn, u8 *num_cids)
+{
+ *num_cids = p_hwfn->vf_iov_info->acquire_resp.resc.num_cids;
+}
+
+void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac)
+{
+ memcpy(port_mac,
+ p_hwfn->vf_iov_info->acquire_resp.pfdev_info.port_mac, ETH_ALEN);
+}
+
+void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn, u8 *num_vlan_filters)
+{
+ struct qed_vf_iov *p_vf;
+
+ p_vf = p_hwfn->vf_iov_info;
+ *num_vlan_filters = p_vf->acquire_resp.resc.num_vlan_filters;
+}
+
+void qed_vf_get_num_mac_filters(struct qed_hwfn *p_hwfn, u8 *num_mac_filters)
+{
+ struct qed_vf_iov *p_vf = p_hwfn->vf_iov_info;
+
+ *num_mac_filters = p_vf->acquire_resp.resc.num_mac_filters;
+}
+
+bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac)
+{
+ struct qed_bulletin_content *bulletin;
+
+ bulletin = &p_hwfn->vf_iov_info->bulletin_shadow;
+ if (!(bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)))
+ return true;
+
+ /* Forbid VF from changing a MAC enforced by PF */
+ if (ether_addr_equal(bulletin->mac, mac))
+ return false;
+
+ return false;
+}
+
+static bool qed_vf_bulletin_get_forced_mac(struct qed_hwfn *hwfn,
+ u8 *dst_mac, u8 *p_is_forced)
+{
+ struct qed_bulletin_content *bulletin;
+
+ bulletin = &hwfn->vf_iov_info->bulletin_shadow;
+
+ if (bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) {
+ if (p_is_forced)
+ *p_is_forced = 1;
+ } else if (bulletin->valid_bitmap & (1 << VFPF_BULLETIN_MAC_ADDR)) {
+ if (p_is_forced)
+ *p_is_forced = 0;
+ } else {
+ return false;
+ }
+
+ ether_addr_copy(dst_mac, bulletin->mac);
+
+ return true;
+}
+
+static void
+qed_vf_bulletin_get_udp_ports(struct qed_hwfn *p_hwfn,
+ u16 *p_vxlan_port, u16 *p_geneve_port)
+{
+ struct qed_bulletin_content *p_bulletin;
+
+ p_bulletin = &p_hwfn->vf_iov_info->bulletin_shadow;
+
+ *p_vxlan_port = p_bulletin->vxlan_udp_port;
+ *p_geneve_port = p_bulletin->geneve_udp_port;
+}
+
+void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
+ u16 *fw_major, u16 *fw_minor,
+ u16 *fw_rev, u16 *fw_eng)
+{
+ struct pf_vf_pfdev_info *info;
+
+ info = &p_hwfn->vf_iov_info->acquire_resp.pfdev_info;
+
+ *fw_major = info->fw_major;
+ *fw_minor = info->fw_minor;
+ *fw_rev = info->fw_rev;
+ *fw_eng = info->fw_eng;
+}
+
+static void qed_handle_bulletin_change(struct qed_hwfn *hwfn)
+{
+ struct qed_eth_cb_ops *ops = hwfn->cdev->protocol_ops.eth;
+ u8 mac[ETH_ALEN], is_mac_exist, is_mac_forced;
+ void *cookie = hwfn->cdev->ops_cookie;
+ u16 vxlan_port, geneve_port;
+
+ qed_vf_bulletin_get_udp_ports(hwfn, &vxlan_port, &geneve_port);
+ is_mac_exist = qed_vf_bulletin_get_forced_mac(hwfn, mac,
+ &is_mac_forced);
+ if (is_mac_exist && cookie)
+ ops->force_mac(cookie, mac, !!is_mac_forced);
+
+ ops->ports_update(cookie, vxlan_port, geneve_port);
+
+ /* Always update link configuration according to bulletin */
+ qed_link_update(hwfn, NULL);
+}
+
+void qed_iov_vf_task(struct work_struct *work)
+{
+ struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
+ iov_task.work);
+ u8 change = 0;
+
+ if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags))
+ return;
+
+ /* Handle bulletin board changes */
+ qed_vf_read_bulletin(hwfn, &change);
+ if (test_and_clear_bit(QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG,
+ &hwfn->iov_task_flags))
+ change = 1;
+ if (change)
+ qed_handle_bulletin_change(hwfn);
+
+ /* As VF is polling bulletin board, need to constantly re-schedule */
+ queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, HZ);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h
new file mode 100644
index 000000000..2bd51a41c
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h
@@ -0,0 +1,1275 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ */
+
+#ifndef _QED_VF_H
+#define _QED_VF_H
+
+#include "qed_l2.h"
+#include "qed_mcp.h"
+
+#define T_ETH_INDIRECTION_TABLE_SIZE 128
+#define T_ETH_RSS_KEY_SIZE 10
+
+struct vf_pf_resc_request {
+ u8 num_rxqs;
+ u8 num_txqs;
+ u8 num_sbs;
+ u8 num_mac_filters;
+ u8 num_vlan_filters;
+ u8 num_mc_filters;
+ u8 num_cids;
+ u8 padding;
+};
+
+struct hw_sb_info {
+ u16 hw_sb_id;
+ u8 sb_qid;
+ u8 padding[5];
+};
+
+#define TLV_BUFFER_SIZE 1024
+
+enum {
+ PFVF_STATUS_WAITING,
+ PFVF_STATUS_SUCCESS,
+ PFVF_STATUS_FAILURE,
+ PFVF_STATUS_NOT_SUPPORTED,
+ PFVF_STATUS_NO_RESOURCE,
+ PFVF_STATUS_FORCED,
+ PFVF_STATUS_MALICIOUS,
+};
+
+/* vf pf channel tlvs */
+/* general tlv header (used for both vf->pf request and pf->vf response) */
+struct channel_tlv {
+ u16 type;
+ u16 length;
+};
+
+/* header of first vf->pf tlv carries the offset used to calculate response
+ * buffer address
+ */
+struct vfpf_first_tlv {
+ struct channel_tlv tl;
+ u32 padding;
+ u64 reply_address;
+};
+
+/* header of pf->vf tlvs, carries the status of handling the request */
+struct pfvf_tlv {
+ struct channel_tlv tl;
+ u8 status;
+ u8 padding[3];
+};
+
+/* response tlv used for most tlvs */
+struct pfvf_def_resp_tlv {
+ struct pfvf_tlv hdr;
+};
+
+/* used to terminate and pad a tlv list */
+struct channel_list_end_tlv {
+ struct channel_tlv tl;
+ u8 padding[4];
+};
+
+#define VFPF_ACQUIRE_OS_LINUX (0)
+#define VFPF_ACQUIRE_OS_WINDOWS (1)
+#define VFPF_ACQUIRE_OS_ESX (2)
+#define VFPF_ACQUIRE_OS_SOLARIS (3)
+#define VFPF_ACQUIRE_OS_LINUX_USERSPACE (4)
+
+struct vfpf_acquire_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ struct vf_pf_vfdev_info {
+#define VFPF_ACQUIRE_CAP_PRE_FP_HSI BIT(0) /* VF pre-FP hsi version */
+#define VFPF_ACQUIRE_CAP_100G BIT(1) /* VF can support 100g */
+ /* A requirement for supporting multi-Tx queues on a single queue-zone,
+ * VF would pass qids as additional information whenever passing queue
+ * references.
+ */
+#define VFPF_ACQUIRE_CAP_QUEUE_QIDS BIT(2)
+
+ /* The VF is using the physical bar. While this is mostly internal
+ * to the VF, might affect the number of CIDs supported assuming
+ * QUEUE_QIDS is set.
+ */
+#define VFPF_ACQUIRE_CAP_PHYSICAL_BAR BIT(3)
+ u64 capabilities;
+ u8 fw_major;
+ u8 fw_minor;
+ u8 fw_revision;
+ u8 fw_engineering;
+ u32 driver_version;
+ u16 opaque_fid; /* ME register value */
+ u8 os_type; /* VFPF_ACQUIRE_OS_* value */
+ u8 eth_fp_hsi_major;
+ u8 eth_fp_hsi_minor;
+ u8 padding[3];
+ } vfdev_info;
+
+ struct vf_pf_resc_request resc_request;
+
+ u64 bulletin_addr;
+ u32 bulletin_size;
+ u32 padding;
+};
+
+/* receive side scaling tlv */
+struct vfpf_vport_update_rss_tlv {
+ struct channel_tlv tl;
+
+ u8 update_rss_flags;
+#define VFPF_UPDATE_RSS_CONFIG_FLAG BIT(0)
+#define VFPF_UPDATE_RSS_CAPS_FLAG BIT(1)
+#define VFPF_UPDATE_RSS_IND_TABLE_FLAG BIT(2)
+#define VFPF_UPDATE_RSS_KEY_FLAG BIT(3)
+
+ u8 rss_enable;
+ u8 rss_caps;
+ u8 rss_table_size_log; /* The table size is 2 ^ rss_table_size_log */
+ u16 rss_ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
+ u32 rss_key[T_ETH_RSS_KEY_SIZE];
+};
+
+struct pfvf_storm_stats {
+ u32 address;
+ u32 len;
+};
+
+struct pfvf_stats_info {
+ struct pfvf_storm_stats mstats;
+ struct pfvf_storm_stats pstats;
+ struct pfvf_storm_stats tstats;
+ struct pfvf_storm_stats ustats;
+};
+
+struct pfvf_acquire_resp_tlv {
+ struct pfvf_tlv hdr;
+
+ struct pf_vf_pfdev_info {
+ u32 chip_num;
+ u32 mfw_ver;
+
+ u16 fw_major;
+ u16 fw_minor;
+ u16 fw_rev;
+ u16 fw_eng;
+
+ u64 capabilities;
+#define PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED BIT(0)
+#define PFVF_ACQUIRE_CAP_100G BIT(1) /* If set, 100g PF */
+/* There are old PF versions where the PF might mistakenly override the sanity
+ * mechanism [version-based] and allow a VF that can't be supported to pass
+ * the acquisition phase.
+ * To overcome this, PFs now indicate that they're past that point and the new
+ * VFs would fail probe on the older PFs that fail to do so.
+ */
+#define PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE BIT(2)
+
+ /* PF expects queues to be received with additional qids */
+#define PFVF_ACQUIRE_CAP_QUEUE_QIDS BIT(3)
+
+ u16 db_size;
+ u8 indices_per_sb;
+ u8 os_type;
+
+ /* These should match the PF's qed_dev values */
+ u16 chip_rev;
+ u8 dev_type;
+
+ /* Doorbell bar size configured in HW: log(size) or 0 */
+ u8 bar_size;
+
+ struct pfvf_stats_info stats_info;
+
+ u8 port_mac[ETH_ALEN];
+
+ /* It's possible PF had to configure an older fastpath HSI
+ * [in case VF is newer than PF]. This is communicated back
+ * to the VF. It can also be used in case of error due to
+ * non-matching versions to shed light in VF about failure.
+ */
+ u8 major_fp_hsi;
+ u8 minor_fp_hsi;
+ } pfdev_info;
+
+ struct pf_vf_resc {
+#define PFVF_MAX_QUEUES_PER_VF 16
+#define PFVF_MAX_SBS_PER_VF 16
+ struct hw_sb_info hw_sbs[PFVF_MAX_SBS_PER_VF];
+ u8 hw_qid[PFVF_MAX_QUEUES_PER_VF];
+ u8 cid[PFVF_MAX_QUEUES_PER_VF];
+
+ u8 num_rxqs;
+ u8 num_txqs;
+ u8 num_sbs;
+ u8 num_mac_filters;
+ u8 num_vlan_filters;
+ u8 num_mc_filters;
+ u8 num_cids;
+ u8 padding;
+ } resc;
+
+ u32 bulletin_size;
+ u32 padding;
+};
+
+struct pfvf_start_queue_resp_tlv {
+ struct pfvf_tlv hdr;
+ u32 offset; /* offset to consumer/producer of queue */
+ u8 padding[4];
+};
+
+/* Extended queue information - additional index for reference inside qzone.
+ * If communicated between VF/PF, each TLV relating to queues should be
+ * extended by one such [or have a future base TLV that already contains info].
+ */
+struct vfpf_qid_tlv {
+ struct channel_tlv tl;
+ u8 qid;
+ u8 padding[3];
+};
+
+/* Setup Queue */
+struct vfpf_start_rxq_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ /* physical addresses */
+ u64 rxq_addr;
+ u64 deprecated_sge_addr;
+ u64 cqe_pbl_addr;
+
+ u16 cqe_pbl_size;
+ u16 hw_sb;
+ u16 rx_qid;
+ u16 hc_rate; /* desired interrupts per sec. */
+
+ u16 bd_max_bytes;
+ u16 stat_id;
+ u8 sb_index;
+ u8 padding[3];
+};
+
+struct vfpf_start_txq_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ /* physical addresses */
+ u64 pbl_addr;
+ u16 pbl_size;
+ u16 stat_id;
+ u16 tx_qid;
+ u16 hw_sb;
+
+ u32 flags; /* VFPF_QUEUE_FLG_X flags */
+ u16 hc_rate; /* desired interrupts per sec. */
+ u8 sb_index;
+ u8 padding[3];
+};
+
+/* Stop RX Queue */
+struct vfpf_stop_rxqs_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ u16 rx_qid;
+
+ /* this field is deprecated and should *always* be set to '1' */
+ u8 num_rxqs;
+ u8 cqe_completion;
+ u8 padding[4];
+};
+
+/* Stop TX Queues */
+struct vfpf_stop_txqs_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ u16 tx_qid;
+
+ /* this field is deprecated and should *always* be set to '1' */
+ u8 num_txqs;
+ u8 padding[5];
+};
+
+struct vfpf_update_rxq_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ u64 deprecated_sge_addr[PFVF_MAX_QUEUES_PER_VF];
+
+ u16 rx_qid;
+ u8 num_rxqs;
+ u8 flags;
+#define VFPF_RXQ_UPD_INIT_SGE_DEPRECATE_FLAG BIT(0)
+#define VFPF_RXQ_UPD_COMPLETE_CQE_FLAG BIT(1)
+#define VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG BIT(2)
+
+ u8 padding[4];
+};
+
+/* Set Queue Filters */
+struct vfpf_q_mac_vlan_filter {
+ u32 flags;
+#define VFPF_Q_FILTER_DEST_MAC_VALID 0x01
+#define VFPF_Q_FILTER_VLAN_TAG_VALID 0x02
+#define VFPF_Q_FILTER_SET_MAC 0x100 /* set/clear */
+
+ u8 mac[ETH_ALEN];
+ u16 vlan_tag;
+
+ u8 padding[4];
+};
+
+/* Start a vport */
+struct vfpf_vport_start_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ u64 sb_addr[PFVF_MAX_SBS_PER_VF];
+
+ u32 tpa_mode;
+ u16 dep1;
+ u16 mtu;
+
+ u8 vport_id;
+ u8 inner_vlan_removal;
+
+ u8 only_untagged;
+ u8 max_buffers_per_cqe;
+
+ u8 padding[4];
+};
+
+/* Extended tlvs - need to add rss, mcast, accept mode tlvs */
+struct vfpf_vport_update_activate_tlv {
+ struct channel_tlv tl;
+ u8 update_rx;
+ u8 update_tx;
+ u8 active_rx;
+ u8 active_tx;
+};
+
+struct vfpf_vport_update_tx_switch_tlv {
+ struct channel_tlv tl;
+ u8 tx_switching;
+ u8 padding[3];
+};
+
+struct vfpf_vport_update_vlan_strip_tlv {
+ struct channel_tlv tl;
+ u8 remove_vlan;
+ u8 padding[3];
+};
+
+struct vfpf_vport_update_mcast_bin_tlv {
+ struct channel_tlv tl;
+ u8 padding[4];
+
+ /* There are only 256 approx bins, and in HSI they're divided into
+ * 32-bit values. As old VFs used to set-bit to the values on its side,
+ * the upper half of the array is never expected to contain any data.
+ */
+ u64 bins[4];
+ u64 obsolete_bins[4];
+};
+
+struct vfpf_vport_update_accept_param_tlv {
+ struct channel_tlv tl;
+ u8 update_rx_mode;
+ u8 update_tx_mode;
+ u8 rx_accept_filter;
+ u8 tx_accept_filter;
+};
+
+struct vfpf_vport_update_accept_any_vlan_tlv {
+ struct channel_tlv tl;
+ u8 update_accept_any_vlan_flg;
+ u8 accept_any_vlan;
+
+ u8 padding[2];
+};
+
+struct vfpf_vport_update_sge_tpa_tlv {
+ struct channel_tlv tl;
+
+ u16 sge_tpa_flags;
+#define VFPF_TPA_IPV4_EN_FLAG BIT(0)
+#define VFPF_TPA_IPV6_EN_FLAG BIT(1)
+#define VFPF_TPA_PKT_SPLIT_FLAG BIT(2)
+#define VFPF_TPA_HDR_DATA_SPLIT_FLAG BIT(3)
+#define VFPF_TPA_GRO_CONSIST_FLAG BIT(4)
+
+ u8 update_sge_tpa_flags;
+#define VFPF_UPDATE_SGE_DEPRECATED_FLAG BIT(0)
+#define VFPF_UPDATE_TPA_EN_FLAG BIT(1)
+#define VFPF_UPDATE_TPA_PARAM_FLAG BIT(2)
+
+ u8 max_buffers_per_cqe;
+
+ u16 deprecated_sge_buff_size;
+ u16 tpa_max_size;
+ u16 tpa_min_size_to_start;
+ u16 tpa_min_size_to_cont;
+
+ u8 tpa_max_aggs_num;
+ u8 padding[7];
+};
+
+/* Primary tlv as a header for various extended tlvs for
+ * various functionalities in vport update ramrod.
+ */
+struct vfpf_vport_update_tlv {
+ struct vfpf_first_tlv first_tlv;
+};
+
+struct vfpf_ucast_filter_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ u8 opcode;
+ u8 type;
+
+ u8 mac[ETH_ALEN];
+
+ u16 vlan;
+ u16 padding[3];
+};
+
+/* tunnel update param tlv */
+struct vfpf_update_tunn_param_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ u8 tun_mode_update_mask;
+ u8 tunn_mode;
+ u8 update_tun_cls;
+ u8 vxlan_clss;
+ u8 l2gre_clss;
+ u8 ipgre_clss;
+ u8 l2geneve_clss;
+ u8 ipgeneve_clss;
+ u8 update_geneve_port;
+ u8 update_vxlan_port;
+ u16 geneve_port;
+ u16 vxlan_port;
+ u8 padding[2];
+};
+
+struct pfvf_update_tunn_param_tlv {
+ struct pfvf_tlv hdr;
+
+ u16 tunn_feature_mask;
+ u8 vxlan_mode;
+ u8 l2geneve_mode;
+ u8 ipgeneve_mode;
+ u8 l2gre_mode;
+ u8 ipgre_mode;
+ u8 vxlan_clss;
+ u8 l2gre_clss;
+ u8 ipgre_clss;
+ u8 l2geneve_clss;
+ u8 ipgeneve_clss;
+ u16 vxlan_udp_port;
+ u16 geneve_udp_port;
+};
+
+struct tlv_buffer_size {
+ u8 tlv_buffer[TLV_BUFFER_SIZE];
+};
+
+struct vfpf_update_coalesce {
+ struct vfpf_first_tlv first_tlv;
+ u16 rx_coal;
+ u16 tx_coal;
+ u16 qid;
+ u8 padding[2];
+};
+
+struct vfpf_read_coal_req_tlv {
+ struct vfpf_first_tlv first_tlv;
+ u16 qid;
+ u8 is_rx;
+ u8 padding[5];
+};
+
+struct pfvf_read_coal_resp_tlv {
+ struct pfvf_tlv hdr;
+ u16 coal;
+ u8 padding[6];
+};
+
+struct vfpf_bulletin_update_mac_tlv {
+ struct vfpf_first_tlv first_tlv;
+ u8 mac[ETH_ALEN];
+ u8 padding[2];
+};
+
+union vfpf_tlvs {
+ struct vfpf_first_tlv first_tlv;
+ struct vfpf_acquire_tlv acquire;
+ struct vfpf_start_rxq_tlv start_rxq;
+ struct vfpf_start_txq_tlv start_txq;
+ struct vfpf_stop_rxqs_tlv stop_rxqs;
+ struct vfpf_stop_txqs_tlv stop_txqs;
+ struct vfpf_update_rxq_tlv update_rxq;
+ struct vfpf_vport_start_tlv start_vport;
+ struct vfpf_vport_update_tlv vport_update;
+ struct vfpf_ucast_filter_tlv ucast_filter;
+ struct vfpf_update_tunn_param_tlv tunn_param_update;
+ struct vfpf_update_coalesce update_coalesce;
+ struct vfpf_read_coal_req_tlv read_coal_req;
+ struct vfpf_bulletin_update_mac_tlv bulletin_update_mac;
+ struct tlv_buffer_size tlv_buf_size;
+};
+
+union pfvf_tlvs {
+ struct pfvf_def_resp_tlv default_resp;
+ struct pfvf_acquire_resp_tlv acquire_resp;
+ struct tlv_buffer_size tlv_buf_size;
+ struct pfvf_start_queue_resp_tlv queue_start;
+ struct pfvf_update_tunn_param_tlv tunn_param_resp;
+ struct pfvf_read_coal_resp_tlv read_coal_resp;
+};
+
+enum qed_bulletin_bit {
+ /* Alert the VF that a forced MAC was set by the PF */
+ MAC_ADDR_FORCED = 0,
+ /* Alert the VF that a forced VLAN was set by the PF */
+ VLAN_ADDR_FORCED = 2,
+
+ /* Indicate that `default_only_untagged' contains actual data */
+ VFPF_BULLETIN_UNTAGGED_DEFAULT = 3,
+ VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED = 4,
+
+ /* Alert the VF that suggested mac was sent by the PF.
+ * MAC_ADDR will be disabled in case MAC_ADDR_FORCED is set.
+ */
+ VFPF_BULLETIN_MAC_ADDR = 5
+};
+
+struct qed_bulletin_content {
+ /* crc of structure to ensure is not in mid-update */
+ u32 crc;
+
+ u32 version;
+
+ /* bitmap indicating which fields hold valid values */
+ u64 valid_bitmap;
+
+ /* used for MAC_ADDR or MAC_ADDR_FORCED */
+ u8 mac[ETH_ALEN];
+
+ /* If valid, 1 => only untagged Rx if no vlan is configured */
+ u8 default_only_untagged;
+ u8 padding;
+
+ /* The following is a 'copy' of qed_mcp_link_state,
+ * qed_mcp_link_params and qed_mcp_link_capabilities. Since it's
+ * possible the structs will increase further along the road we cannot
+ * have it here; Instead we need to have all of its fields.
+ */
+ u8 req_autoneg;
+ u8 req_autoneg_pause;
+ u8 req_forced_rx;
+ u8 req_forced_tx;
+ u8 padding2[4];
+
+ u32 req_adv_speed;
+ u32 req_forced_speed;
+ u32 req_loopback;
+ u32 padding3;
+
+ u8 link_up;
+ u8 full_duplex;
+ u8 autoneg;
+ u8 autoneg_complete;
+ u8 parallel_detection;
+ u8 pfc_enabled;
+ u8 partner_tx_flow_ctrl_en;
+ u8 partner_rx_flow_ctrl_en;
+ u8 partner_adv_pause;
+ u8 sfp_tx_fault;
+ u16 vxlan_udp_port;
+ u16 geneve_udp_port;
+ u8 padding4[2];
+
+ u32 speed;
+ u32 partner_adv_speed;
+
+ u32 capability_speed;
+
+ /* Forced vlan */
+ u16 pvid;
+ u16 padding5;
+};
+
+struct qed_bulletin {
+ dma_addr_t phys;
+ struct qed_bulletin_content *p_virt;
+ u32 size;
+};
+
+enum {
+ CHANNEL_TLV_NONE, /* ends tlv sequence */
+ CHANNEL_TLV_ACQUIRE,
+ CHANNEL_TLV_VPORT_START,
+ CHANNEL_TLV_VPORT_UPDATE,
+ CHANNEL_TLV_VPORT_TEARDOWN,
+ CHANNEL_TLV_START_RXQ,
+ CHANNEL_TLV_START_TXQ,
+ CHANNEL_TLV_STOP_RXQS,
+ CHANNEL_TLV_STOP_TXQS,
+ CHANNEL_TLV_UPDATE_RXQ,
+ CHANNEL_TLV_INT_CLEANUP,
+ CHANNEL_TLV_CLOSE,
+ CHANNEL_TLV_RELEASE,
+ CHANNEL_TLV_LIST_END,
+ CHANNEL_TLV_UCAST_FILTER,
+ CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,
+ CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH,
+ CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP,
+ CHANNEL_TLV_VPORT_UPDATE_MCAST,
+ CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM,
+ CHANNEL_TLV_VPORT_UPDATE_RSS,
+ CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN,
+ CHANNEL_TLV_VPORT_UPDATE_SGE_TPA,
+ CHANNEL_TLV_UPDATE_TUNN_PARAM,
+ CHANNEL_TLV_COALESCE_UPDATE,
+ CHANNEL_TLV_QID,
+ CHANNEL_TLV_COALESCE_READ,
+ CHANNEL_TLV_BULLETIN_UPDATE_MAC,
+ CHANNEL_TLV_MAX,
+
+ /* Required for iterating over vport-update tlvs.
+ * Will break in case non-sequential vport-update tlvs.
+ */
+ CHANNEL_TLV_VPORT_UPDATE_MAX = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA + 1,
+};
+
+/* Default number of CIDs [total of both Rx and Tx] to be requested
+ * by default, and maximum possible number.
+ */
+#define QED_ETH_VF_DEFAULT_NUM_CIDS (32)
+#define QED_ETH_VF_MAX_NUM_CIDS (250)
+
+/* This data is held in the qed_hwfn structure for VFs only. */
+struct qed_vf_iov {
+ union vfpf_tlvs *vf2pf_request;
+ dma_addr_t vf2pf_request_phys;
+ union pfvf_tlvs *pf2vf_reply;
+ dma_addr_t pf2vf_reply_phys;
+
+ /* Should be taken whenever the mailbox buffers are accessed */
+ struct mutex mutex;
+ u8 *offset;
+
+ /* Bulletin Board */
+ struct qed_bulletin bulletin;
+ struct qed_bulletin_content bulletin_shadow;
+
+ /* we set aside a copy of the acquire response */
+ struct pfvf_acquire_resp_tlv acquire_resp;
+
+ /* In case PF originates prior to the fp-hsi version comparison,
+ * this has to be propagated as it affects the fastpath.
+ */
+ bool b_pre_fp_hsi;
+
+ /* Current day VFs are passing the SBs physical address on vport
+ * start, and as they lack an IGU mapping they need to store the
+ * addresses of previously registered SBs.
+ * Even if we were to change configuration flow, due to backward
+ * compatibility [with older PFs] we'd still need to store these.
+ */
+ struct qed_sb_info *sbs_info[PFVF_MAX_SBS_PER_VF];
+
+ /* Determines whether VF utilizes doorbells via limited register
+ * bar or via the doorbell bar.
+ */
+ bool b_doorbell_bar;
+};
+
+/**
+ * qed_vf_pf_set_coalesce(): VF - Set Rx/Tx coalesce per VF's relative queue.
+ * Coalesce value '0' will omit the
+ * configuration.
+ *
+ * @p_hwfn: HW device data.
+ * @rx_coal: coalesce value in micro second for rx queue.
+ * @tx_coal: coalesce value in micro second for tx queue.
+ * @p_cid: queue cid.
+ *
+ * Return: Int.
+ *
+ **/
+int qed_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn,
+ u16 rx_coal,
+ u16 tx_coal, struct qed_queue_cid *p_cid);
+
+/**
+ * qed_vf_pf_get_coalesce(): VF - Get coalesce per VF's relative queue.
+ *
+ * @p_hwfn: HW device data.
+ * @p_coal: coalesce value in micro second for VF queues.
+ * @p_cid: queue cid.
+ *
+ * Return: Int.
+ **/
+int qed_vf_pf_get_coalesce(struct qed_hwfn *p_hwfn,
+ u16 *p_coal, struct qed_queue_cid *p_cid);
+
+#ifdef CONFIG_QED_SRIOV
+/**
+ * qed_vf_read_bulletin(): Read the VF bulletin and act on it if needed.
+ *
+ * @p_hwfn: HW device data.
+ * @p_change: qed fills 1 iff bulletin board has changed, 0 otherwise.
+ *
+ * Return: enum _qed_status.
+ */
+int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change);
+
+/**
+ * qed_vf_get_link_params(): Get link parameters for VF from qed
+ *
+ * @p_hwfn: HW device data.
+ * @params: the link params structure to be filled for the VF.
+ *
+ * Return: Void.
+ */
+void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_params *params);
+
+/**
+ * qed_vf_get_link_state(): Get link state for VF from qed.
+ *
+ * @p_hwfn: HW device data.
+ * @link: the link state structure to be filled for the VF
+ *
+ * Return: Void.
+ */
+void qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_state *link);
+
+/**
+ * qed_vf_get_link_caps(): Get link capabilities for VF from qed.
+ *
+ * @p_hwfn: HW device data.
+ * @p_link_caps: the link capabilities structure to be filled for the VF
+ *
+ * Return: Void.
+ */
+void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_capabilities *p_link_caps);
+
+/**
+ * qed_vf_get_num_rxqs(): Get number of Rx queues allocated for VF by qed
+ *
+ * @p_hwfn: HW device data.
+ * @num_rxqs: allocated RX queues
+ *
+ * Return: Void.
+ */
+void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs);
+
+/**
+ * qed_vf_get_num_txqs(): Get number of Rx queues allocated for VF by qed
+ *
+ * @p_hwfn: HW device data.
+ * @num_txqs: allocated RX queues
+ *
+ * Return: Void.
+ */
+void qed_vf_get_num_txqs(struct qed_hwfn *p_hwfn, u8 *num_txqs);
+
+/**
+ * qed_vf_get_num_cids(): Get number of available connections
+ * [both Rx and Tx] for VF
+ *
+ * @p_hwfn: HW device data.
+ * @num_cids: allocated number of connections
+ *
+ * Return: Void.
+ */
+void qed_vf_get_num_cids(struct qed_hwfn *p_hwfn, u8 *num_cids);
+
+/**
+ * qed_vf_get_port_mac(): Get port mac address for VF.
+ *
+ * @p_hwfn: HW device data.
+ * @port_mac: destination location for port mac
+ *
+ * Return: Void.
+ */
+void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac);
+
+/**
+ * qed_vf_get_num_vlan_filters(): Get number of VLAN filters allocated
+ * for VF by qed.
+ *
+ * @p_hwfn: HW device data.
+ * @num_vlan_filters: allocated VLAN filters
+ *
+ * Return: Void.
+ */
+void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn,
+ u8 *num_vlan_filters);
+
+/**
+ * qed_vf_get_num_mac_filters(): Get number of MAC filters allocated
+ * for VF by qed
+ *
+ * @p_hwfn: HW device data.
+ * @num_mac_filters: allocated MAC filters
+ *
+ * Return: Void.
+ */
+void qed_vf_get_num_mac_filters(struct qed_hwfn *p_hwfn, u8 *num_mac_filters);
+
+/**
+ * qed_vf_check_mac(): Check if VF can set a MAC address
+ *
+ * @p_hwfn: HW device data.
+ * @mac: Mac.
+ *
+ * Return: bool.
+ */
+bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac);
+
+/**
+ * qed_vf_get_fw_version(): Set firmware version information
+ * in dev_info from VFs acquire response tlv
+ *
+ * @p_hwfn: HW device data.
+ * @fw_major: FW major.
+ * @fw_minor: FW minor.
+ * @fw_rev: FW rev.
+ * @fw_eng: FW eng.
+ *
+ * Return: Void.
+ */
+void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
+ u16 *fw_major, u16 *fw_minor,
+ u16 *fw_rev, u16 *fw_eng);
+
+/**
+ * qed_vf_hw_prepare(): hw preparation for VF sends ACQUIRE message
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Return: Int.
+ */
+int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn);
+
+/**
+ * qed_vf_pf_rxq_start(): start the RX Queue by sending a message to the PF
+ *
+ * @p_hwfn: HW device data.
+ * @p_cid: Only relative fields are relevant
+ * @bd_max_bytes: maximum number of bytes per bd
+ * @bd_chain_phys_addr: physical address of bd chain
+ * @cqe_pbl_addr: physical address of pbl
+ * @cqe_pbl_size: pbl size
+ * @pp_prod: pointer to the producer to be used in fastpath
+ *
+ * Return: Int.
+ */
+int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
+ struct qed_queue_cid *p_cid,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size, void __iomem **pp_prod);
+
+/**
+ * qed_vf_pf_txq_start(): VF - start the TX queue by sending a message to the
+ * PF.
+ *
+ * @p_hwfn: HW device data.
+ * @p_cid: CID.
+ * @pbl_addr: PBL address.
+ * @pbl_size: PBL Size.
+ * @pp_doorbell: pointer to address to which to write the doorbell too.
+ *
+ * Return: Int.
+ */
+int
+qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
+ struct qed_queue_cid *p_cid,
+ dma_addr_t pbl_addr,
+ u16 pbl_size, void __iomem **pp_doorbell);
+
+/**
+ * qed_vf_pf_rxq_stop(): VF - stop the RX queue by sending a message to the PF.
+ *
+ * @p_hwfn: HW device data.
+ * @p_cid: CID.
+ * @cqe_completion: CQE Completion.
+ *
+ * Return: Int.
+ */
+int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
+ struct qed_queue_cid *p_cid, bool cqe_completion);
+
+/**
+ * qed_vf_pf_txq_stop(): VF - stop the TX queue by sending a message to the PF.
+ *
+ * @p_hwfn: HW device data.
+ * @p_cid: CID.
+ *
+ * Return: Int.
+ */
+int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid);
+
+/**
+ * qed_vf_pf_vport_update(): VF - send a vport update command.
+ *
+ * @p_hwfn: HW device data.
+ * @p_params: Params
+ *
+ * Return: Int.
+ */
+int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_params);
+
+/**
+ * qed_vf_pf_reset(): VF - send a close message to PF.
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Return: enum _qed_status
+ */
+int qed_vf_pf_reset(struct qed_hwfn *p_hwfn);
+
+/**
+ * qed_vf_pf_release(): VF - free vf`s memories.
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Return: enum _qed_status
+ */
+int qed_vf_pf_release(struct qed_hwfn *p_hwfn);
+
+/**
+ * qed_vf_get_igu_sb_id(): Get the IGU SB ID for a given
+ * sb_id. For VFs igu sbs don't have to be contiguous
+ *
+ * @p_hwfn: HW device data.
+ * @sb_id: SB ID.
+ *
+ * Return: INLINE u16
+ */
+u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id);
+
+/**
+ * qed_vf_set_sb_info(): Stores [or removes] a configured sb_info.
+ *
+ * @p_hwfn: HW device data.
+ * @sb_id: zero-based SB index [for fastpath]
+ * @p_sb: may be NULL [during removal].
+ *
+ * Return: Void.
+ */
+void qed_vf_set_sb_info(struct qed_hwfn *p_hwfn,
+ u16 sb_id, struct qed_sb_info *p_sb);
+
+/**
+ * qed_vf_pf_vport_start(): perform vport start for VF.
+ *
+ * @p_hwfn: HW device data.
+ * @vport_id: Vport ID.
+ * @mtu: MTU.
+ * @inner_vlan_removal: Innter VLAN removal.
+ * @tpa_mode: TPA mode
+ * @max_buffers_per_cqe: Max buffer pre CQE.
+ * @only_untagged: default behavior regarding vlan acceptance
+ *
+ * Return: enum _qed_status
+ */
+int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
+ u8 vport_id,
+ u16 mtu,
+ u8 inner_vlan_removal,
+ enum qed_tpa_mode tpa_mode,
+ u8 max_buffers_per_cqe, u8 only_untagged);
+
+/**
+ * qed_vf_pf_vport_stop(): stop the VF's vport
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Return: enum _qed_status
+ */
+int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn);
+
+int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
+ struct qed_filter_ucast *p_param);
+
+void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
+ struct qed_filter_mcast *p_filter_cmd);
+
+/**
+ * qed_vf_pf_int_cleanup(): clean the SB of the VF
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Return: enum _qed_status
+ */
+int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn);
+
+/**
+ * __qed_vf_get_link_params(): return the link params in a given bulletin board
+ *
+ * @p_hwfn: HW device data.
+ * @p_params: pointer to a struct to fill with link params
+ * @p_bulletin: Bulletin.
+ *
+ * Return: Void.
+ */
+void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_params *p_params,
+ struct qed_bulletin_content *p_bulletin);
+
+/**
+ * __qed_vf_get_link_state(): return the link state in a given bulletin board
+ *
+ * @p_hwfn: HW device data.
+ * @p_link: pointer to a struct to fill with link state
+ * @p_bulletin: Bulletin.
+ *
+ * Return: Void.
+ */
+void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_state *p_link,
+ struct qed_bulletin_content *p_bulletin);
+
+/**
+ * __qed_vf_get_link_caps(): return the link capabilities in a given
+ * bulletin board
+ *
+ * @p_hwfn: HW device data.
+ * @p_link_caps: pointer to a struct to fill with link capabilities
+ * @p_bulletin: Bulletin.
+ *
+ * Return: Void.
+ */
+void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_capabilities *p_link_caps,
+ struct qed_bulletin_content *p_bulletin);
+
+void qed_iov_vf_task(struct work_struct *work);
+void qed_vf_set_vf_start_tunn_update_param(struct qed_tunnel_info *p_tun);
+int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn,
+ struct qed_tunnel_info *p_tunn);
+
+u32 qed_vf_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id);
+/**
+ * qed_vf_pf_bulletin_update_mac(): Ask PF to update the MAC address in
+ * it's bulletin board
+ *
+ * @p_hwfn: HW device data.
+ * @p_mac: mac address to be updated in bulletin board
+ *
+ * Return: Int.
+ */
+int qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn, const u8 *p_mac);
+
+#else
+static inline void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_params *params)
+{
+}
+
+static inline void qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_state *link)
+{
+}
+
+static inline void
+qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_capabilities *p_link_caps)
+{
+}
+
+static inline void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs)
+{
+}
+
+static inline void qed_vf_get_num_txqs(struct qed_hwfn *p_hwfn, u8 *num_txqs)
+{
+}
+
+static inline void qed_vf_get_num_cids(struct qed_hwfn *p_hwfn, u8 *num_cids)
+{
+}
+
+static inline void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac)
+{
+}
+
+static inline void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn,
+ u8 *num_vlan_filters)
+{
+}
+
+static inline void qed_vf_get_num_mac_filters(struct qed_hwfn *p_hwfn,
+ u8 *num_mac_filters)
+{
+}
+
+static inline bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac)
+{
+ return false;
+}
+
+static inline void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
+ u16 *fw_major, u16 *fw_minor,
+ u16 *fw_rev, u16 *fw_eng)
+{
+}
+
+static inline int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
+{
+ return -EINVAL;
+}
+
+static inline int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
+ struct qed_queue_cid *p_cid,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_adr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size, void __iomem **pp_prod)
+{
+ return -EINVAL;
+}
+
+static inline int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
+ struct qed_queue_cid *p_cid,
+ dma_addr_t pbl_addr,
+ u16 pbl_size, void __iomem **pp_doorbell)
+{
+ return -EINVAL;
+}
+
+static inline int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
+ struct qed_queue_cid *p_cid,
+ bool cqe_completion)
+{
+ return -EINVAL;
+}
+
+static inline int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn,
+ struct qed_queue_cid *p_cid)
+{
+ return -EINVAL;
+}
+
+static inline int
+qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_params)
+{
+ return -EINVAL;
+}
+
+static inline int qed_vf_pf_reset(struct qed_hwfn *p_hwfn)
+{
+ return -EINVAL;
+}
+
+static inline int qed_vf_pf_release(struct qed_hwfn *p_hwfn)
+{
+ return -EINVAL;
+}
+
+static inline u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
+{
+ return 0;
+}
+
+static inline void qed_vf_set_sb_info(struct qed_hwfn *p_hwfn, u16 sb_id,
+ struct qed_sb_info *p_sb)
+{
+}
+
+static inline int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
+ u8 vport_id,
+ u16 mtu,
+ u8 inner_vlan_removal,
+ enum qed_tpa_mode tpa_mode,
+ u8 max_buffers_per_cqe,
+ u8 only_untagged)
+{
+ return -EINVAL;
+}
+
+static inline int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn)
+{
+ return -EINVAL;
+}
+
+static inline int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
+ struct qed_filter_ucast *p_param)
+{
+ return -EINVAL;
+}
+
+static inline void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
+ struct qed_filter_mcast *p_filter_cmd)
+{
+}
+
+static inline int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn)
+{
+ return -EINVAL;
+}
+
+static inline void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_params
+ *p_params,
+ struct qed_bulletin_content
+ *p_bulletin)
+{
+}
+
+static inline void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_state *p_link,
+ struct qed_bulletin_content
+ *p_bulletin)
+{
+}
+
+static inline void
+__qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_capabilities *p_link_caps,
+ struct qed_bulletin_content *p_bulletin)
+{
+}
+
+static inline void qed_iov_vf_task(struct work_struct *work)
+{
+}
+
+static inline void
+qed_vf_set_vf_start_tunn_update_param(struct qed_tunnel_info *p_tun)
+{
+}
+
+static inline int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn,
+ struct qed_tunnel_info *p_tunn)
+{
+ return -EINVAL;
+}
+
+static inline int qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn,
+ const u8 *p_mac)
+{
+ return -EINVAL;
+}
+
+static inline u32
+qed_vf_hw_bar_size(struct qed_hwfn *p_hwfn,
+ enum BAR_ID bar_id)
+{
+ return 0;
+}
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qede/Makefile b/drivers/net/ethernet/qlogic/qede/Makefile
new file mode 100644
index 000000000..a6e8d9fc8
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qede/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+# Copyright (c) 2019-2020 Marvell International Ltd.
+
+obj-$(CONFIG_QEDE) := qede.o
+
+qede-y := qede_main.o qede_fp.o qede_filter.o qede_ethtool.o qede_ptp.o
+qede-$(CONFIG_DCB) += qede_dcbnl.o
+qede-$(CONFIG_QED_RDMA) += qede_rdma.o
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
new file mode 100644
index 000000000..8a63f99d4
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -0,0 +1,614 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* QLogic qede NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ * Copyright (c) 2019-2020 Marvell International Ltd.
+ */
+
+#ifndef _QEDE_H_
+#define _QEDE_H_
+#include <linux/compiler.h>
+#include <linux/version.h>
+#include <linux/workqueue.h>
+#include <linux/netdevice.h>
+#include <linux/interrupt.h>
+#include <linux/bitmap.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/bpf.h>
+#include <net/xdp.h>
+#include <linux/qed/qede_rdma.h>
+#include <linux/io.h>
+#ifdef CONFIG_RFS_ACCEL
+#include <linux/cpu_rmap.h>
+#endif
+#include <linux/qed/common_hsi.h>
+#include <linux/qed/eth_common.h>
+#include <linux/qed/qed_if.h>
+#include <linux/qed/qed_chain.h>
+#include <linux/qed/qed_eth_if.h>
+
+#include <net/pkt_cls.h>
+#include <net/tc_act/tc_gact.h>
+
+#define DRV_MODULE_SYM qede
+
+struct qede_stats_common {
+ u64 no_buff_discards;
+ u64 packet_too_big_discard;
+ u64 ttl0_discard;
+ u64 rx_ucast_bytes;
+ u64 rx_mcast_bytes;
+ u64 rx_bcast_bytes;
+ u64 rx_ucast_pkts;
+ u64 rx_mcast_pkts;
+ u64 rx_bcast_pkts;
+ u64 mftag_filter_discards;
+ u64 mac_filter_discards;
+ u64 gft_filter_drop;
+ u64 tx_ucast_bytes;
+ u64 tx_mcast_bytes;
+ u64 tx_bcast_bytes;
+ u64 tx_ucast_pkts;
+ u64 tx_mcast_pkts;
+ u64 tx_bcast_pkts;
+ u64 tx_err_drop_pkts;
+ u64 coalesced_pkts;
+ u64 coalesced_events;
+ u64 coalesced_aborts_num;
+ u64 non_coalesced_pkts;
+ u64 coalesced_bytes;
+ u64 link_change_count;
+ u64 ptp_skip_txts;
+
+ /* port */
+ u64 rx_64_byte_packets;
+ u64 rx_65_to_127_byte_packets;
+ u64 rx_128_to_255_byte_packets;
+ u64 rx_256_to_511_byte_packets;
+ u64 rx_512_to_1023_byte_packets;
+ u64 rx_1024_to_1518_byte_packets;
+ u64 rx_crc_errors;
+ u64 rx_mac_crtl_frames;
+ u64 rx_pause_frames;
+ u64 rx_pfc_frames;
+ u64 rx_align_errors;
+ u64 rx_carrier_errors;
+ u64 rx_oversize_packets;
+ u64 rx_jabbers;
+ u64 rx_undersize_packets;
+ u64 rx_fragments;
+ u64 tx_64_byte_packets;
+ u64 tx_65_to_127_byte_packets;
+ u64 tx_128_to_255_byte_packets;
+ u64 tx_256_to_511_byte_packets;
+ u64 tx_512_to_1023_byte_packets;
+ u64 tx_1024_to_1518_byte_packets;
+ u64 tx_pause_frames;
+ u64 tx_pfc_frames;
+ u64 brb_truncates;
+ u64 brb_discards;
+ u64 tx_mac_ctrl_frames;
+};
+
+struct qede_stats_bb {
+ u64 rx_1519_to_1522_byte_packets;
+ u64 rx_1519_to_2047_byte_packets;
+ u64 rx_2048_to_4095_byte_packets;
+ u64 rx_4096_to_9216_byte_packets;
+ u64 rx_9217_to_16383_byte_packets;
+ u64 tx_1519_to_2047_byte_packets;
+ u64 tx_2048_to_4095_byte_packets;
+ u64 tx_4096_to_9216_byte_packets;
+ u64 tx_9217_to_16383_byte_packets;
+ u64 tx_lpi_entry_count;
+ u64 tx_total_collisions;
+};
+
+struct qede_stats_ah {
+ u64 rx_1519_to_max_byte_packets;
+ u64 tx_1519_to_max_byte_packets;
+};
+
+struct qede_stats {
+ struct qede_stats_common common;
+
+ union {
+ struct qede_stats_bb bb;
+ struct qede_stats_ah ah;
+ };
+};
+
+struct qede_vlan {
+ struct list_head list;
+ u16 vid;
+ bool configured;
+};
+
+struct qede_rdma_dev {
+ struct qedr_dev *qedr_dev;
+ struct list_head entry;
+ struct list_head rdma_event_list;
+ struct workqueue_struct *rdma_wq;
+ struct kref refcnt;
+ struct completion event_comp;
+ bool exp_recovery;
+};
+
+struct qede_ptp;
+
+#define QEDE_RFS_MAX_FLTR 256
+
+enum qede_flags_bit {
+ QEDE_FLAGS_IS_VF = 0,
+ QEDE_FLAGS_LINK_REQUESTED,
+ QEDE_FLAGS_PTP_TX_IN_PRORGESS,
+ QEDE_FLAGS_TX_TIMESTAMPING_EN
+};
+
+#define QEDE_DUMP_MAX_ARGS 4
+enum qede_dump_cmd {
+ QEDE_DUMP_CMD_NONE = 0,
+ QEDE_DUMP_CMD_NVM_CFG,
+ QEDE_DUMP_CMD_GRCDUMP,
+ QEDE_DUMP_CMD_MAX
+};
+
+struct qede_dump_info {
+ enum qede_dump_cmd cmd;
+ u8 num_args;
+ u32 args[QEDE_DUMP_MAX_ARGS];
+};
+
+struct qede_coalesce {
+ bool isvalid;
+ u16 rxc;
+ u16 txc;
+};
+
+struct qede_dev {
+ struct qed_dev *cdev;
+ struct net_device *ndev;
+ struct pci_dev *pdev;
+ struct devlink *devlink;
+
+ u32 dp_module;
+ u8 dp_level;
+
+ unsigned long flags;
+#define IS_VF(edev) test_bit(QEDE_FLAGS_IS_VF, \
+ &(edev)->flags)
+
+ const struct qed_eth_ops *ops;
+ struct qede_ptp *ptp;
+ u64 ptp_skip_txts;
+
+ struct qed_dev_eth_info dev_info;
+#define QEDE_MAX_RSS_CNT(edev) ((edev)->dev_info.num_queues)
+#define QEDE_MAX_TSS_CNT(edev) ((edev)->dev_info.num_queues)
+#define QEDE_IS_BB(edev) \
+ ((edev)->dev_info.common.dev_type == QED_DEV_TYPE_BB)
+#define QEDE_IS_AH(edev) \
+ ((edev)->dev_info.common.dev_type == QED_DEV_TYPE_AH)
+
+ struct qede_fastpath *fp_array;
+ struct qede_coalesce *coal_entry;
+ u8 req_num_tx;
+ u8 fp_num_tx;
+ u8 req_num_rx;
+ u8 fp_num_rx;
+ u16 req_queues;
+ u16 num_queues;
+ u16 total_xdp_queues;
+
+#define QEDE_QUEUE_CNT(edev) ((edev)->num_queues)
+#define QEDE_RSS_COUNT(edev) ((edev)->num_queues - (edev)->fp_num_tx)
+#define QEDE_RX_QUEUE_IDX(edev, i) (i)
+#define QEDE_TSS_COUNT(edev) ((edev)->num_queues - (edev)->fp_num_rx)
+
+ struct qed_int_info int_info;
+
+ /* Smaller private variant of the RTNL lock */
+ struct mutex qede_lock;
+ u32 state; /* Protected by qede_lock */
+ u16 rx_buf_size;
+ u32 rx_copybreak;
+
+ /* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
+#define ETH_OVERHEAD (ETH_HLEN + 8 + 8)
+ /* Max supported alignment is 256 (8 shift)
+ * minimal alignment shift 6 is optimal for 57xxx HW performance
+ */
+#define QEDE_RX_ALIGN_SHIFT max(6, min(8, L1_CACHE_SHIFT))
+ /* We assume skb_build() uses sizeof(struct skb_shared_info) bytes
+ * at the end of skb->data, to avoid wasting a full cache line.
+ * This reduces memory use (skb->truesize).
+ */
+#define QEDE_FW_RX_ALIGN_END \
+ max_t(u64, 1UL << QEDE_RX_ALIGN_SHIFT, \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
+ struct qede_stats stats;
+
+ /* Bitfield to track initialized RSS params */
+ u32 rss_params_inited;
+#define QEDE_RSS_INDIR_INITED BIT(0)
+#define QEDE_RSS_KEY_INITED BIT(1)
+#define QEDE_RSS_CAPS_INITED BIT(2)
+
+ u16 rss_ind_table[128];
+ u32 rss_key[10];
+ u8 rss_caps;
+
+ /* Both must be a power of two */
+ u16 q_num_rx_buffers;
+ u16 q_num_tx_buffers;
+
+ bool gro_disable;
+
+ struct list_head vlan_list;
+ u16 configured_vlans;
+ u16 non_configured_vlans;
+ bool accept_any_vlan;
+
+ struct delayed_work sp_task;
+ unsigned long sp_flags;
+ u16 vxlan_dst_port;
+ u16 geneve_dst_port;
+
+ struct qede_arfs *arfs;
+ bool wol_enabled;
+
+ struct qede_rdma_dev rdma_info;
+
+ struct bpf_prog *xdp_prog;
+
+ enum qed_hw_err_type last_err_type;
+ unsigned long err_flags;
+#define QEDE_ERR_IS_HANDLED 31
+#define QEDE_ERR_ATTN_CLR_EN 0
+#define QEDE_ERR_GET_DBG_INFO 1
+#define QEDE_ERR_IS_RECOVERABLE 2
+#define QEDE_ERR_WARN 3
+
+ struct qede_dump_info dump_info;
+ struct delayed_work periodic_task;
+ unsigned long stats_coal_ticks;
+ u32 stats_coal_usecs;
+ spinlock_t stats_lock; /* lock for vport stats access */
+};
+
+enum QEDE_STATE {
+ QEDE_STATE_CLOSED,
+ QEDE_STATE_OPEN,
+ QEDE_STATE_RECOVERY,
+};
+
+#define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo))
+
+#define MAX_NUM_TC 8
+#define MAX_NUM_PRI 8
+
+/* The driver supports the new build_skb() API:
+ * RX ring buffer contains pointer to kmalloc() data only,
+ * skb are built only after the frame was DMA-ed.
+ */
+struct sw_rx_data {
+ struct page *data;
+ dma_addr_t mapping;
+ unsigned int page_offset;
+};
+
+enum qede_agg_state {
+ QEDE_AGG_STATE_NONE = 0,
+ QEDE_AGG_STATE_START = 1,
+ QEDE_AGG_STATE_ERROR = 2
+};
+
+struct qede_agg_info {
+ /* rx_buf is a data buffer that can be placed / consumed from rx bd
+ * chain. It has two purposes: We will preallocate the data buffer
+ * for each aggregation when we open the interface and will place this
+ * buffer on the rx-bd-ring when we receive TPA_START. We don't want
+ * to be in a state where allocation fails, as we can't reuse the
+ * consumer buffer in the rx-chain since FW may still be writing to it
+ * (since header needs to be modified for TPA).
+ * The second purpose is to keep a pointer to the bd buffer during
+ * aggregation.
+ */
+ struct sw_rx_data buffer;
+ struct sk_buff *skb;
+
+ /* We need some structs from the start cookie until termination */
+ u16 vlan_tag;
+
+ bool tpa_start_fail;
+ u8 state;
+ u8 frag_id;
+
+ u8 tunnel_type;
+};
+
+struct qede_rx_queue {
+ __le16 *hw_cons_ptr;
+ void __iomem *hw_rxq_prod_addr;
+
+ /* Required for the allocation of replacement buffers */
+ struct device *dev;
+
+ struct bpf_prog *xdp_prog;
+
+ u16 sw_rx_cons;
+ u16 sw_rx_prod;
+
+ u16 filled_buffers;
+ u8 data_direction;
+ u8 rxq_id;
+
+ /* Used once per each NAPI run */
+ u16 num_rx_buffers;
+
+ u16 rx_headroom;
+
+ u32 rx_buf_size;
+ u32 rx_buf_seg_size;
+
+ struct sw_rx_data *sw_rx_ring;
+ struct qed_chain rx_bd_ring;
+ struct qed_chain rx_comp_ring ____cacheline_aligned;
+
+ /* GRO */
+ struct qede_agg_info tpa_info[ETH_TPA_MAX_AGGS_NUM];
+
+ /* Used once per each NAPI run */
+ u64 rcv_pkts;
+
+ u64 rx_hw_errors;
+ u64 rx_alloc_errors;
+ u64 rx_ip_frags;
+
+ u64 xdp_no_pass;
+
+ void *handle;
+ struct xdp_rxq_info xdp_rxq;
+};
+
+union db_prod {
+ struct eth_db_data data;
+ u32 raw;
+};
+
+struct sw_tx_bd {
+ struct sk_buff *skb;
+ u8 flags;
+/* Set on the first BD descriptor when there is a split BD */
+#define QEDE_TSO_SPLIT_BD BIT(0)
+};
+
+struct sw_tx_xdp {
+ struct page *page;
+ struct xdp_frame *xdpf;
+ dma_addr_t mapping;
+};
+
+struct qede_tx_queue {
+ u8 is_xdp;
+ bool is_legacy;
+ u16 sw_tx_cons;
+ u16 sw_tx_prod;
+ u16 num_tx_buffers; /* Slowpath only */
+
+ u64 xmit_pkts;
+ u64 stopped_cnt;
+ u64 tx_mem_alloc_err;
+
+ __le16 *hw_cons_ptr;
+
+ /* Needed for the mapping of packets */
+ struct device *dev;
+
+ void __iomem *doorbell_addr;
+ union db_prod tx_db;
+
+ /* Spinlock for XDP queues in case of XDP_REDIRECT */
+ spinlock_t xdp_tx_lock;
+
+ int index; /* Slowpath only */
+#define QEDE_TXQ_XDP_TO_IDX(edev, txq) ((txq)->index - \
+ QEDE_MAX_TSS_CNT(edev))
+#define QEDE_TXQ_IDX_TO_XDP(edev, idx) ((idx) + QEDE_MAX_TSS_CNT(edev))
+#define QEDE_NDEV_TXQ_ID_TO_FP_ID(edev, idx) ((edev)->fp_num_rx + \
+ ((idx) % QEDE_TSS_COUNT(edev)))
+#define QEDE_NDEV_TXQ_ID_TO_TXQ_COS(edev, idx) ((idx) / QEDE_TSS_COUNT(edev))
+#define QEDE_TXQ_TO_NDEV_TXQ_ID(edev, txq) ((QEDE_TSS_COUNT(edev) * \
+ (txq)->cos) + (txq)->index)
+#define QEDE_NDEV_TXQ_ID_TO_TXQ(edev, idx) \
+ (&((edev)->fp_array[QEDE_NDEV_TXQ_ID_TO_FP_ID(edev, idx)].txq \
+ [QEDE_NDEV_TXQ_ID_TO_TXQ_COS(edev, idx)]))
+#define QEDE_FP_TC0_TXQ(fp) (&((fp)->txq[0]))
+
+ /* Regular Tx requires skb + metadata for release purpose,
+ * while XDP requires the pages and the mapped address.
+ */
+ union {
+ struct sw_tx_bd *skbs;
+ struct sw_tx_xdp *xdp;
+ } sw_tx_ring;
+
+ struct qed_chain tx_pbl;
+
+ /* Slowpath; Should be kept in end [unless missing padding] */
+ void *handle;
+ u16 cos;
+ u16 ndev_txq_id;
+};
+
+#define BD_UNMAP_ADDR(bd) HILO_U64(le32_to_cpu((bd)->addr.hi), \
+ le32_to_cpu((bd)->addr.lo))
+#define BD_SET_UNMAP_ADDR_LEN(bd, maddr, len) \
+ do { \
+ (bd)->addr.hi = cpu_to_le32(upper_32_bits(maddr)); \
+ (bd)->addr.lo = cpu_to_le32(lower_32_bits(maddr)); \
+ (bd)->nbytes = cpu_to_le16(len); \
+ } while (0)
+#define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes))
+
+struct qede_fastpath {
+ struct qede_dev *edev;
+
+ u8 type;
+#define QEDE_FASTPATH_TX BIT(0)
+#define QEDE_FASTPATH_RX BIT(1)
+#define QEDE_FASTPATH_XDP BIT(2)
+#define QEDE_FASTPATH_COMBINED (QEDE_FASTPATH_TX | QEDE_FASTPATH_RX)
+
+ u8 id;
+
+ u8 xdp_xmit;
+#define QEDE_XDP_TX BIT(0)
+#define QEDE_XDP_REDIRECT BIT(1)
+
+ struct napi_struct napi;
+ struct qed_sb_info *sb_info;
+ struct qede_rx_queue *rxq;
+ struct qede_tx_queue *txq;
+ struct qede_tx_queue *xdp_tx;
+
+ char name[IFNAMSIZ + 8];
+};
+
+/* Debug print definitions */
+#define DP_NAME(edev) netdev_name((edev)->ndev)
+
+#define XMIT_PLAIN 0
+#define XMIT_L4_CSUM BIT(0)
+#define XMIT_LSO BIT(1)
+#define XMIT_ENC BIT(2)
+#define XMIT_ENC_GSO_L4_CSUM BIT(3)
+
+#define QEDE_CSUM_ERROR BIT(0)
+#define QEDE_CSUM_UNNECESSARY BIT(1)
+#define QEDE_TUNN_CSUM_UNNECESSARY BIT(2)
+
+#define QEDE_SP_RECOVERY 0
+#define QEDE_SP_RX_MODE 1
+#define QEDE_SP_RSVD1 2
+#define QEDE_SP_RSVD2 3
+#define QEDE_SP_HW_ERR 4
+#define QEDE_SP_ARFS_CONFIG 5
+#define QEDE_SP_AER 7
+#define QEDE_SP_DISABLE 8
+
+#ifdef CONFIG_RFS_ACCEL
+int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
+ u16 rxq_index, u32 flow_id);
+#define QEDE_SP_TASK_POLL_DELAY (5 * HZ)
+#endif
+
+void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr);
+void qede_poll_for_freeing_arfs_filters(struct qede_dev *edev);
+void qede_arfs_filter_op(void *dev, void *filter, u8 fw_rc);
+void qede_free_arfs(struct qede_dev *edev);
+int qede_alloc_arfs(struct qede_dev *edev);
+int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info);
+int qede_delete_flow_filter(struct qede_dev *edev, u64 cookie);
+int qede_get_cls_rule_entry(struct qede_dev *edev, struct ethtool_rxnfc *cmd);
+int qede_get_cls_rule_all(struct qede_dev *edev, struct ethtool_rxnfc *info,
+ u32 *rule_locs);
+int qede_get_arfs_filter_count(struct qede_dev *edev);
+
+struct qede_reload_args {
+ void (*func)(struct qede_dev *edev, struct qede_reload_args *args);
+ union {
+ netdev_features_t features;
+ struct bpf_prog *new_prog;
+ u16 mtu;
+ } u;
+};
+
+/* Datapath functions definition */
+netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev);
+int qede_xdp_transmit(struct net_device *dev, int n_frames,
+ struct xdp_frame **frames, u32 flags);
+u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev);
+netdev_features_t qede_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features);
+int qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy);
+int qede_free_tx_pkt(struct qede_dev *edev,
+ struct qede_tx_queue *txq, int *len);
+int qede_poll(struct napi_struct *napi, int budget);
+irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie);
+
+/* Filtering function definitions */
+void qede_force_mac(void *dev, u8 *mac, bool forced);
+void qede_udp_ports_update(void *dev, u16 vxlan_port, u16 geneve_port);
+int qede_set_mac_addr(struct net_device *ndev, void *p);
+
+int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid);
+int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid);
+void qede_vlan_mark_nonconfigured(struct qede_dev *edev);
+int qede_configure_vlan_filters(struct qede_dev *edev);
+
+netdev_features_t qede_fix_features(struct net_device *dev,
+ netdev_features_t features);
+int qede_set_features(struct net_device *dev, netdev_features_t features);
+void qede_set_rx_mode(struct net_device *ndev);
+void qede_config_rx_mode(struct net_device *ndev);
+void qede_fill_rss_params(struct qede_dev *edev,
+ struct qed_update_vport_rss_params *rss, u8 *update);
+
+void qede_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti);
+void qede_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *ti);
+
+int qede_xdp(struct net_device *dev, struct netdev_bpf *xdp);
+
+#ifdef CONFIG_DCB
+void qede_set_dcbnl_ops(struct net_device *ndev);
+#endif
+
+void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level);
+void qede_set_ethtool_ops(struct net_device *netdev);
+void qede_set_udp_tunnels(struct qede_dev *edev);
+void qede_reload(struct qede_dev *edev,
+ struct qede_reload_args *args, bool is_locked);
+int qede_change_mtu(struct net_device *dev, int new_mtu);
+void qede_fill_by_demand_stats(struct qede_dev *edev);
+void __qede_lock(struct qede_dev *edev);
+void __qede_unlock(struct qede_dev *edev);
+bool qede_has_rx_work(struct qede_rx_queue *rxq);
+int qede_txq_has_work(struct qede_tx_queue *txq);
+void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count);
+void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq);
+int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
+ struct flow_cls_offload *f);
+
+void qede_forced_speed_maps_init(void);
+int qede_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack);
+int qede_set_per_coalesce(struct net_device *dev, u32 queue,
+ struct ethtool_coalesce *coal);
+
+#define RX_RING_SIZE_POW 13
+#define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW))
+#define NUM_RX_BDS_MAX (RX_RING_SIZE - 1)
+#define NUM_RX_BDS_MIN 128
+#define NUM_RX_BDS_KDUMP_MIN 63
+#define NUM_RX_BDS_DEF ((u16)BIT(10) - 1)
+
+#define TX_RING_SIZE_POW 13
+#define TX_RING_SIZE ((u16)BIT(TX_RING_SIZE_POW))
+#define NUM_TX_BDS_MAX (TX_RING_SIZE - 1)
+#define NUM_TX_BDS_MIN 128
+#define NUM_TX_BDS_KDUMP_MIN 63
+#define NUM_TX_BDS_DEF NUM_TX_BDS_MAX
+
+#define QEDE_MIN_PKT_LEN 64
+#define QEDE_RX_HDR_SIZE 256
+#define QEDE_MAX_JUMBO_PACKET_SIZE 9600
+#define for_each_queue(i) for (i = 0; i < edev->num_queues; i++)
+#define for_each_cos_in_txq(edev, var) \
+ for ((var) = 0; (var) < (edev)->dev_info.num_tc; (var)++)
+
+#endif /* _QEDE_H_ */
diff --git a/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c b/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c
new file mode 100644
index 000000000..2763369bb
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c
@@ -0,0 +1,350 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/* QLogic qede NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ * Copyright (c) 2019-2020 Marvell International Ltd.
+ */
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
+#include <net/dcbnl.h>
+#include "qede.h"
+
+static u8 qede_dcbnl_getstate(struct net_device *netdev)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->getstate(edev->cdev);
+}
+
+static u8 qede_dcbnl_setstate(struct net_device *netdev, u8 state)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setstate(edev->cdev, state);
+}
+
+static void qede_dcbnl_getpermhwaddr(struct net_device *netdev,
+ u8 *perm_addr)
+{
+ memcpy(perm_addr, netdev->dev_addr, netdev->addr_len);
+}
+
+static void qede_dcbnl_getpgtccfgtx(struct net_device *netdev, int prio,
+ u8 *prio_type, u8 *pgid, u8 *bw_pct,
+ u8 *up_map)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ edev->ops->dcb->getpgtccfgtx(edev->cdev, prio, prio_type,
+ pgid, bw_pct, up_map);
+}
+
+static void qede_dcbnl_getpgbwgcfgtx(struct net_device *netdev,
+ int pgid, u8 *bw_pct)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ edev->ops->dcb->getpgbwgcfgtx(edev->cdev, pgid, bw_pct);
+}
+
+static void qede_dcbnl_getpgtccfgrx(struct net_device *netdev, int prio,
+ u8 *prio_type, u8 *pgid, u8 *bw_pct,
+ u8 *up_map)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ edev->ops->dcb->getpgtccfgrx(edev->cdev, prio, prio_type, pgid, bw_pct,
+ up_map);
+}
+
+static void qede_dcbnl_getpgbwgcfgrx(struct net_device *netdev,
+ int pgid, u8 *bw_pct)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ edev->ops->dcb->getpgbwgcfgrx(edev->cdev, pgid, bw_pct);
+}
+
+static void qede_dcbnl_getpfccfg(struct net_device *netdev, int prio,
+ u8 *setting)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ edev->ops->dcb->getpfccfg(edev->cdev, prio, setting);
+}
+
+static void qede_dcbnl_setpfccfg(struct net_device *netdev, int prio,
+ u8 setting)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ edev->ops->dcb->setpfccfg(edev->cdev, prio, setting);
+}
+
+static u8 qede_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->getcap(edev->cdev, capid, cap);
+}
+
+static int qede_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->getnumtcs(edev->cdev, tcid, num);
+}
+
+static u8 qede_dcbnl_getpfcstate(struct net_device *netdev)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->getpfcstate(edev->cdev);
+}
+
+static int qede_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->getapp(edev->cdev, idtype, id);
+}
+
+static u8 qede_dcbnl_getdcbx(struct net_device *netdev)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->getdcbx(edev->cdev);
+}
+
+static void qede_dcbnl_setpgtccfgtx(struct net_device *netdev, int prio,
+ u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setpgtccfgtx(edev->cdev, prio, pri_type, pgid,
+ bw_pct, up_map);
+}
+
+static void qede_dcbnl_setpgtccfgrx(struct net_device *netdev, int prio,
+ u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setpgtccfgrx(edev->cdev, prio, pri_type, pgid,
+ bw_pct, up_map);
+}
+
+static void qede_dcbnl_setpgbwgcfgtx(struct net_device *netdev, int pgid,
+ u8 bw_pct)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setpgbwgcfgtx(edev->cdev, pgid, bw_pct);
+}
+
+static void qede_dcbnl_setpgbwgcfgrx(struct net_device *netdev, int pgid,
+ u8 bw_pct)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setpgbwgcfgrx(edev->cdev, pgid, bw_pct);
+}
+
+static u8 qede_dcbnl_setall(struct net_device *netdev)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setall(edev->cdev);
+}
+
+static int qede_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setnumtcs(edev->cdev, tcid, num);
+}
+
+static void qede_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setpfcstate(edev->cdev, state);
+}
+
+static int qede_dcbnl_setapp(struct net_device *netdev, u8 idtype, u16 idval,
+ u8 up)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setapp(edev->cdev, idtype, idval, up);
+}
+
+static u8 qede_dcbnl_setdcbx(struct net_device *netdev, u8 state)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setdcbx(edev->cdev, state);
+}
+
+static u8 qede_dcbnl_getfeatcfg(struct net_device *netdev, int featid,
+ u8 *flags)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->getfeatcfg(edev->cdev, featid, flags);
+}
+
+static u8 qede_dcbnl_setfeatcfg(struct net_device *netdev, int featid, u8 flags)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setfeatcfg(edev->cdev, featid, flags);
+}
+
+static int qede_dcbnl_peer_getappinfo(struct net_device *netdev,
+ struct dcb_peer_app_info *info,
+ u16 *count)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->peer_getappinfo(edev->cdev, info, count);
+}
+
+static int qede_dcbnl_peer_getapptable(struct net_device *netdev,
+ struct dcb_app *app)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->peer_getapptable(edev->cdev, app);
+}
+
+static int qede_dcbnl_cee_peer_getpfc(struct net_device *netdev,
+ struct cee_pfc *pfc)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->cee_peer_getpfc(edev->cdev, pfc);
+}
+
+static int qede_dcbnl_cee_peer_getpg(struct net_device *netdev,
+ struct cee_pg *pg)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->cee_peer_getpg(edev->cdev, pg);
+}
+
+static int qede_dcbnl_ieee_getpfc(struct net_device *netdev,
+ struct ieee_pfc *pfc)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->ieee_getpfc(edev->cdev, pfc);
+}
+
+static int qede_dcbnl_ieee_setpfc(struct net_device *netdev,
+ struct ieee_pfc *pfc)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->ieee_setpfc(edev->cdev, pfc);
+}
+
+static int qede_dcbnl_ieee_getets(struct net_device *netdev,
+ struct ieee_ets *ets)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->ieee_getets(edev->cdev, ets);
+}
+
+static int qede_dcbnl_ieee_setets(struct net_device *netdev,
+ struct ieee_ets *ets)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->ieee_setets(edev->cdev, ets);
+}
+
+static int qede_dcbnl_ieee_getapp(struct net_device *netdev,
+ struct dcb_app *app)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->ieee_getapp(edev->cdev, app);
+}
+
+static int qede_dcbnl_ieee_setapp(struct net_device *netdev,
+ struct dcb_app *app)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+ int err;
+
+ err = dcb_ieee_setapp(netdev, app);
+ if (err)
+ return err;
+
+ return edev->ops->dcb->ieee_setapp(edev->cdev, app);
+}
+
+static int qede_dcbnl_ieee_peer_getpfc(struct net_device *netdev,
+ struct ieee_pfc *pfc)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->ieee_peer_getpfc(edev->cdev, pfc);
+}
+
+static int qede_dcbnl_ieee_peer_getets(struct net_device *netdev,
+ struct ieee_ets *ets)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->ieee_peer_getets(edev->cdev, ets);
+}
+
+static const struct dcbnl_rtnl_ops qede_dcbnl_ops = {
+ .ieee_getpfc = qede_dcbnl_ieee_getpfc,
+ .ieee_setpfc = qede_dcbnl_ieee_setpfc,
+ .ieee_getets = qede_dcbnl_ieee_getets,
+ .ieee_setets = qede_dcbnl_ieee_setets,
+ .ieee_getapp = qede_dcbnl_ieee_getapp,
+ .ieee_setapp = qede_dcbnl_ieee_setapp,
+ .ieee_peer_getpfc = qede_dcbnl_ieee_peer_getpfc,
+ .ieee_peer_getets = qede_dcbnl_ieee_peer_getets,
+ .getstate = qede_dcbnl_getstate,
+ .setstate = qede_dcbnl_setstate,
+ .getpermhwaddr = qede_dcbnl_getpermhwaddr,
+ .getpgtccfgtx = qede_dcbnl_getpgtccfgtx,
+ .getpgbwgcfgtx = qede_dcbnl_getpgbwgcfgtx,
+ .getpgtccfgrx = qede_dcbnl_getpgtccfgrx,
+ .getpgbwgcfgrx = qede_dcbnl_getpgbwgcfgrx,
+ .getpfccfg = qede_dcbnl_getpfccfg,
+ .setpfccfg = qede_dcbnl_setpfccfg,
+ .getcap = qede_dcbnl_getcap,
+ .getnumtcs = qede_dcbnl_getnumtcs,
+ .getpfcstate = qede_dcbnl_getpfcstate,
+ .getapp = qede_dcbnl_getapp,
+ .getdcbx = qede_dcbnl_getdcbx,
+ .setpgtccfgtx = qede_dcbnl_setpgtccfgtx,
+ .setpgtccfgrx = qede_dcbnl_setpgtccfgrx,
+ .setpgbwgcfgtx = qede_dcbnl_setpgbwgcfgtx,
+ .setpgbwgcfgrx = qede_dcbnl_setpgbwgcfgrx,
+ .setall = qede_dcbnl_setall,
+ .setnumtcs = qede_dcbnl_setnumtcs,
+ .setpfcstate = qede_dcbnl_setpfcstate,
+ .setapp = qede_dcbnl_setapp,
+ .setdcbx = qede_dcbnl_setdcbx,
+ .setfeatcfg = qede_dcbnl_setfeatcfg,
+ .getfeatcfg = qede_dcbnl_getfeatcfg,
+ .peer_getappinfo = qede_dcbnl_peer_getappinfo,
+ .peer_getapptable = qede_dcbnl_peer_getapptable,
+ .cee_peer_getpfc = qede_dcbnl_cee_peer_getpfc,
+ .cee_peer_getpg = qede_dcbnl_cee_peer_getpg,
+};
+
+void qede_set_dcbnl_ops(struct net_device *dev)
+{
+ dev->dcbnl_ops = &qede_dcbnl_ops;
+}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
new file mode 100644
index 000000000..d0a3395b2
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -0,0 +1,2363 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/* QLogic qede NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ * Copyright (c) 2019-2020 Marvell International Ltd.
+ */
+
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/string.h>
+#include <linux/pci.h>
+#include <linux/capability.h>
+#include <linux/vmalloc.h>
+#include <linux/phylink.h>
+
+#include "qede.h"
+#include "qede_ptp.h"
+
+#define QEDE_RQSTAT_OFFSET(stat_name) \
+ (offsetof(struct qede_rx_queue, stat_name))
+#define QEDE_RQSTAT_STRING(stat_name) (#stat_name)
+#define QEDE_RQSTAT(stat_name) \
+ {QEDE_RQSTAT_OFFSET(stat_name), QEDE_RQSTAT_STRING(stat_name)}
+
+#define QEDE_SELFTEST_POLL_COUNT 100
+#define QEDE_DUMP_VERSION 0x1
+#define QEDE_DUMP_NVM_ARG_COUNT 2
+
+static const struct {
+ u64 offset;
+ char string[ETH_GSTRING_LEN];
+} qede_rqstats_arr[] = {
+ QEDE_RQSTAT(rcv_pkts),
+ QEDE_RQSTAT(rx_hw_errors),
+ QEDE_RQSTAT(rx_alloc_errors),
+ QEDE_RQSTAT(rx_ip_frags),
+ QEDE_RQSTAT(xdp_no_pass),
+};
+
+#define QEDE_NUM_RQSTATS ARRAY_SIZE(qede_rqstats_arr)
+#define QEDE_TQSTAT_OFFSET(stat_name) \
+ (offsetof(struct qede_tx_queue, stat_name))
+#define QEDE_TQSTAT_STRING(stat_name) (#stat_name)
+#define QEDE_TQSTAT(stat_name) \
+ {QEDE_TQSTAT_OFFSET(stat_name), QEDE_TQSTAT_STRING(stat_name)}
+#define QEDE_NUM_TQSTATS ARRAY_SIZE(qede_tqstats_arr)
+static const struct {
+ u64 offset;
+ char string[ETH_GSTRING_LEN];
+} qede_tqstats_arr[] = {
+ QEDE_TQSTAT(xmit_pkts),
+ QEDE_TQSTAT(stopped_cnt),
+ QEDE_TQSTAT(tx_mem_alloc_err),
+};
+
+#define QEDE_STAT_OFFSET(stat_name, type, base) \
+ (offsetof(type, stat_name) + (base))
+#define QEDE_STAT_STRING(stat_name) (#stat_name)
+#define _QEDE_STAT(stat_name, type, base, attr) \
+ {QEDE_STAT_OFFSET(stat_name, type, base), \
+ QEDE_STAT_STRING(stat_name), \
+ attr}
+#define QEDE_STAT(stat_name) \
+ _QEDE_STAT(stat_name, struct qede_stats_common, 0, 0x0)
+#define QEDE_PF_STAT(stat_name) \
+ _QEDE_STAT(stat_name, struct qede_stats_common, 0, \
+ BIT(QEDE_STAT_PF_ONLY))
+#define QEDE_PF_BB_STAT(stat_name) \
+ _QEDE_STAT(stat_name, struct qede_stats_bb, \
+ offsetof(struct qede_stats, bb), \
+ BIT(QEDE_STAT_PF_ONLY) | BIT(QEDE_STAT_BB_ONLY))
+#define QEDE_PF_AH_STAT(stat_name) \
+ _QEDE_STAT(stat_name, struct qede_stats_ah, \
+ offsetof(struct qede_stats, ah), \
+ BIT(QEDE_STAT_PF_ONLY) | BIT(QEDE_STAT_AH_ONLY))
+static const struct {
+ u64 offset;
+ char string[ETH_GSTRING_LEN];
+ unsigned long attr;
+#define QEDE_STAT_PF_ONLY 0
+#define QEDE_STAT_BB_ONLY 1
+#define QEDE_STAT_AH_ONLY 2
+} qede_stats_arr[] = {
+ QEDE_STAT(rx_ucast_bytes),
+ QEDE_STAT(rx_mcast_bytes),
+ QEDE_STAT(rx_bcast_bytes),
+ QEDE_STAT(rx_ucast_pkts),
+ QEDE_STAT(rx_mcast_pkts),
+ QEDE_STAT(rx_bcast_pkts),
+
+ QEDE_STAT(tx_ucast_bytes),
+ QEDE_STAT(tx_mcast_bytes),
+ QEDE_STAT(tx_bcast_bytes),
+ QEDE_STAT(tx_ucast_pkts),
+ QEDE_STAT(tx_mcast_pkts),
+ QEDE_STAT(tx_bcast_pkts),
+
+ QEDE_PF_STAT(rx_64_byte_packets),
+ QEDE_PF_STAT(rx_65_to_127_byte_packets),
+ QEDE_PF_STAT(rx_128_to_255_byte_packets),
+ QEDE_PF_STAT(rx_256_to_511_byte_packets),
+ QEDE_PF_STAT(rx_512_to_1023_byte_packets),
+ QEDE_PF_STAT(rx_1024_to_1518_byte_packets),
+ QEDE_PF_BB_STAT(rx_1519_to_1522_byte_packets),
+ QEDE_PF_BB_STAT(rx_1519_to_2047_byte_packets),
+ QEDE_PF_BB_STAT(rx_2048_to_4095_byte_packets),
+ QEDE_PF_BB_STAT(rx_4096_to_9216_byte_packets),
+ QEDE_PF_BB_STAT(rx_9217_to_16383_byte_packets),
+ QEDE_PF_AH_STAT(rx_1519_to_max_byte_packets),
+ QEDE_PF_STAT(tx_64_byte_packets),
+ QEDE_PF_STAT(tx_65_to_127_byte_packets),
+ QEDE_PF_STAT(tx_128_to_255_byte_packets),
+ QEDE_PF_STAT(tx_256_to_511_byte_packets),
+ QEDE_PF_STAT(tx_512_to_1023_byte_packets),
+ QEDE_PF_STAT(tx_1024_to_1518_byte_packets),
+ QEDE_PF_BB_STAT(tx_1519_to_2047_byte_packets),
+ QEDE_PF_BB_STAT(tx_2048_to_4095_byte_packets),
+ QEDE_PF_BB_STAT(tx_4096_to_9216_byte_packets),
+ QEDE_PF_BB_STAT(tx_9217_to_16383_byte_packets),
+ QEDE_PF_AH_STAT(tx_1519_to_max_byte_packets),
+ QEDE_PF_STAT(rx_mac_crtl_frames),
+ QEDE_PF_STAT(tx_mac_ctrl_frames),
+ QEDE_PF_STAT(rx_pause_frames),
+ QEDE_PF_STAT(tx_pause_frames),
+ QEDE_PF_STAT(rx_pfc_frames),
+ QEDE_PF_STAT(tx_pfc_frames),
+
+ QEDE_PF_STAT(rx_crc_errors),
+ QEDE_PF_STAT(rx_align_errors),
+ QEDE_PF_STAT(rx_carrier_errors),
+ QEDE_PF_STAT(rx_oversize_packets),
+ QEDE_PF_STAT(rx_jabbers),
+ QEDE_PF_STAT(rx_undersize_packets),
+ QEDE_PF_STAT(rx_fragments),
+ QEDE_PF_BB_STAT(tx_lpi_entry_count),
+ QEDE_PF_BB_STAT(tx_total_collisions),
+ QEDE_PF_STAT(brb_truncates),
+ QEDE_PF_STAT(brb_discards),
+ QEDE_STAT(no_buff_discards),
+ QEDE_PF_STAT(mftag_filter_discards),
+ QEDE_PF_STAT(mac_filter_discards),
+ QEDE_PF_STAT(gft_filter_drop),
+ QEDE_STAT(tx_err_drop_pkts),
+ QEDE_STAT(ttl0_discard),
+ QEDE_STAT(packet_too_big_discard),
+
+ QEDE_STAT(coalesced_pkts),
+ QEDE_STAT(coalesced_events),
+ QEDE_STAT(coalesced_aborts_num),
+ QEDE_STAT(non_coalesced_pkts),
+ QEDE_STAT(coalesced_bytes),
+
+ QEDE_STAT(link_change_count),
+ QEDE_STAT(ptp_skip_txts),
+};
+
+#define QEDE_NUM_STATS ARRAY_SIZE(qede_stats_arr)
+#define QEDE_STAT_IS_PF_ONLY(i) \
+ test_bit(QEDE_STAT_PF_ONLY, &qede_stats_arr[i].attr)
+#define QEDE_STAT_IS_BB_ONLY(i) \
+ test_bit(QEDE_STAT_BB_ONLY, &qede_stats_arr[i].attr)
+#define QEDE_STAT_IS_AH_ONLY(i) \
+ test_bit(QEDE_STAT_AH_ONLY, &qede_stats_arr[i].attr)
+
+enum {
+ QEDE_PRI_FLAG_CMT,
+ QEDE_PRI_FLAG_SMART_AN_SUPPORT, /* MFW supports SmartAN */
+ QEDE_PRI_FLAG_RECOVER_ON_ERROR,
+ QEDE_PRI_FLAG_ESL_SUPPORT, /* MFW supports Enhanced System Lockdown */
+ QEDE_PRI_FLAG_ESL_ACTIVE, /* Enhanced System Lockdown Active status */
+ QEDE_PRI_FLAG_LEN,
+};
+
+static const char qede_private_arr[QEDE_PRI_FLAG_LEN][ETH_GSTRING_LEN] = {
+ "Coupled-Function",
+ "SmartAN capable",
+ "Recover on error",
+ "ESL capable",
+ "ESL active",
+};
+
+enum qede_ethtool_tests {
+ QEDE_ETHTOOL_INT_LOOPBACK,
+ QEDE_ETHTOOL_INTERRUPT_TEST,
+ QEDE_ETHTOOL_MEMORY_TEST,
+ QEDE_ETHTOOL_REGISTER_TEST,
+ QEDE_ETHTOOL_CLOCK_TEST,
+ QEDE_ETHTOOL_NVRAM_TEST,
+ QEDE_ETHTOOL_TEST_MAX
+};
+
+static const char qede_tests_str_arr[QEDE_ETHTOOL_TEST_MAX][ETH_GSTRING_LEN] = {
+ "Internal loopback (offline)",
+ "Interrupt (online)\t",
+ "Memory (online)\t\t",
+ "Register (online)\t",
+ "Clock (online)\t\t",
+ "Nvram (online)\t\t",
+};
+
+/* Forced speed capabilities maps */
+
+struct qede_forced_speed_map {
+ u32 speed;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(caps);
+
+ const u32 *cap_arr;
+ u32 arr_size;
+};
+
+#define QEDE_FORCED_SPEED_MAP(value) \
+{ \
+ .speed = SPEED_##value, \
+ .cap_arr = qede_forced_speed_##value, \
+ .arr_size = ARRAY_SIZE(qede_forced_speed_##value), \
+}
+
+static const u32 qede_forced_speed_1000[] __initconst = {
+ ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+};
+
+static const u32 qede_forced_speed_10000[] __initconst = {
+ ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
+ ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT,
+};
+
+static const u32 qede_forced_speed_20000[] __initconst = {
+ ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
+};
+
+static const u32 qede_forced_speed_25000[] __initconst = {
+ ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+ ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+};
+
+static const u32 qede_forced_speed_40000[] __initconst = {
+ ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
+ ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+ ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
+ ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
+};
+
+static const u32 qede_forced_speed_50000[] __initconst = {
+ ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
+};
+
+static const u32 qede_forced_speed_100000[] __initconst = {
+ ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
+};
+
+static struct qede_forced_speed_map qede_forced_speed_maps[] __ro_after_init = {
+ QEDE_FORCED_SPEED_MAP(1000),
+ QEDE_FORCED_SPEED_MAP(10000),
+ QEDE_FORCED_SPEED_MAP(20000),
+ QEDE_FORCED_SPEED_MAP(25000),
+ QEDE_FORCED_SPEED_MAP(40000),
+ QEDE_FORCED_SPEED_MAP(50000),
+ QEDE_FORCED_SPEED_MAP(100000),
+};
+
+void __init qede_forced_speed_maps_init(void)
+{
+ struct qede_forced_speed_map *map;
+ u32 i;
+
+ for (i = 0; i < ARRAY_SIZE(qede_forced_speed_maps); i++) {
+ map = qede_forced_speed_maps + i;
+
+ linkmode_set_bit_array(map->cap_arr, map->arr_size, map->caps);
+ map->cap_arr = NULL;
+ map->arr_size = 0;
+ }
+}
+
+/* Ethtool callbacks */
+
+static void qede_get_strings_stats_txq(struct qede_dev *edev,
+ struct qede_tx_queue *txq, u8 **buf)
+{
+ int i;
+
+ for (i = 0; i < QEDE_NUM_TQSTATS; i++) {
+ if (txq->is_xdp)
+ sprintf(*buf, "%d [XDP]: %s",
+ QEDE_TXQ_XDP_TO_IDX(edev, txq),
+ qede_tqstats_arr[i].string);
+ else
+ sprintf(*buf, "%d_%d: %s", txq->index, txq->cos,
+ qede_tqstats_arr[i].string);
+ *buf += ETH_GSTRING_LEN;
+ }
+}
+
+static void qede_get_strings_stats_rxq(struct qede_dev *edev,
+ struct qede_rx_queue *rxq, u8 **buf)
+{
+ int i;
+
+ for (i = 0; i < QEDE_NUM_RQSTATS; i++) {
+ sprintf(*buf, "%d: %s", rxq->rxq_id,
+ qede_rqstats_arr[i].string);
+ *buf += ETH_GSTRING_LEN;
+ }
+}
+
+static bool qede_is_irrelevant_stat(struct qede_dev *edev, int stat_index)
+{
+ return (IS_VF(edev) && QEDE_STAT_IS_PF_ONLY(stat_index)) ||
+ (QEDE_IS_BB(edev) && QEDE_STAT_IS_AH_ONLY(stat_index)) ||
+ (QEDE_IS_AH(edev) && QEDE_STAT_IS_BB_ONLY(stat_index));
+}
+
+static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf)
+{
+ struct qede_fastpath *fp;
+ int i;
+
+ /* Account for queue statistics */
+ for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) {
+ fp = &edev->fp_array[i];
+
+ if (fp->type & QEDE_FASTPATH_RX)
+ qede_get_strings_stats_rxq(edev, fp->rxq, &buf);
+
+ if (fp->type & QEDE_FASTPATH_XDP)
+ qede_get_strings_stats_txq(edev, fp->xdp_tx, &buf);
+
+ if (fp->type & QEDE_FASTPATH_TX) {
+ int cos;
+
+ for_each_cos_in_txq(edev, cos)
+ qede_get_strings_stats_txq(edev,
+ &fp->txq[cos], &buf);
+ }
+ }
+
+ /* Account for non-queue statistics */
+ for (i = 0; i < QEDE_NUM_STATS; i++) {
+ if (qede_is_irrelevant_stat(edev, i))
+ continue;
+ strcpy(buf, qede_stats_arr[i].string);
+ buf += ETH_GSTRING_LEN;
+ }
+}
+
+static void qede_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ qede_get_strings_stats(edev, buf);
+ break;
+ case ETH_SS_PRIV_FLAGS:
+ memcpy(buf, qede_private_arr,
+ ETH_GSTRING_LEN * QEDE_PRI_FLAG_LEN);
+ break;
+ case ETH_SS_TEST:
+ memcpy(buf, qede_tests_str_arr,
+ ETH_GSTRING_LEN * QEDE_ETHTOOL_TEST_MAX);
+ break;
+ default:
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "Unsupported stringset 0x%08x\n", stringset);
+ }
+}
+
+static void qede_get_ethtool_stats_txq(struct qede_tx_queue *txq, u64 **buf)
+{
+ int i;
+
+ for (i = 0; i < QEDE_NUM_TQSTATS; i++) {
+ **buf = *((u64 *)(((void *)txq) + qede_tqstats_arr[i].offset));
+ (*buf)++;
+ }
+}
+
+static void qede_get_ethtool_stats_rxq(struct qede_rx_queue *rxq, u64 **buf)
+{
+ int i;
+
+ for (i = 0; i < QEDE_NUM_RQSTATS; i++) {
+ **buf = *((u64 *)(((void *)rxq) + qede_rqstats_arr[i].offset));
+ (*buf)++;
+ }
+}
+
+static void qede_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *buf)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ struct qede_fastpath *fp;
+ int i;
+
+ qede_fill_by_demand_stats(edev);
+
+ /* Need to protect the access to the fastpath array */
+ __qede_lock(edev);
+
+ for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) {
+ fp = &edev->fp_array[i];
+
+ if (fp->type & QEDE_FASTPATH_RX)
+ qede_get_ethtool_stats_rxq(fp->rxq, &buf);
+
+ if (fp->type & QEDE_FASTPATH_XDP)
+ qede_get_ethtool_stats_txq(fp->xdp_tx, &buf);
+
+ if (fp->type & QEDE_FASTPATH_TX) {
+ int cos;
+
+ for_each_cos_in_txq(edev, cos)
+ qede_get_ethtool_stats_txq(&fp->txq[cos], &buf);
+ }
+ }
+
+ spin_lock(&edev->stats_lock);
+
+ for (i = 0; i < QEDE_NUM_STATS; i++) {
+ if (qede_is_irrelevant_stat(edev, i))
+ continue;
+ *buf = *((u64 *)(((void *)&edev->stats) +
+ qede_stats_arr[i].offset));
+
+ buf++;
+ }
+
+ spin_unlock(&edev->stats_lock);
+
+ __qede_unlock(edev);
+}
+
+static int qede_get_sset_count(struct net_device *dev, int stringset)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ int num_stats = QEDE_NUM_STATS, i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < QEDE_NUM_STATS; i++)
+ if (qede_is_irrelevant_stat(edev, i))
+ num_stats--;
+
+ /* Account for the Regular Tx statistics */
+ num_stats += QEDE_TSS_COUNT(edev) * QEDE_NUM_TQSTATS *
+ edev->dev_info.num_tc;
+
+ /* Account for the Regular Rx statistics */
+ num_stats += QEDE_RSS_COUNT(edev) * QEDE_NUM_RQSTATS;
+
+ /* Account for XDP statistics [if needed] */
+ if (edev->xdp_prog)
+ num_stats += QEDE_RSS_COUNT(edev) * QEDE_NUM_TQSTATS;
+ return num_stats;
+
+ case ETH_SS_PRIV_FLAGS:
+ return QEDE_PRI_FLAG_LEN;
+ case ETH_SS_TEST:
+ if (!IS_VF(edev))
+ return QEDE_ETHTOOL_TEST_MAX;
+ else
+ return 0;
+ default:
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "Unsupported stringset 0x%08x\n", stringset);
+ return -EINVAL;
+ }
+}
+
+static u32 qede_get_priv_flags(struct net_device *dev)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ bool esl_active;
+ u32 flags = 0;
+
+ if (edev->dev_info.common.num_hwfns > 1)
+ flags |= BIT(QEDE_PRI_FLAG_CMT);
+
+ if (edev->dev_info.common.smart_an)
+ flags |= BIT(QEDE_PRI_FLAG_SMART_AN_SUPPORT);
+
+ if (edev->err_flags & BIT(QEDE_ERR_IS_RECOVERABLE))
+ flags |= BIT(QEDE_PRI_FLAG_RECOVER_ON_ERROR);
+
+ if (edev->dev_info.common.esl)
+ flags |= BIT(QEDE_PRI_FLAG_ESL_SUPPORT);
+
+ edev->ops->common->get_esl_status(edev->cdev, &esl_active);
+
+ if (esl_active)
+ flags |= BIT(QEDE_PRI_FLAG_ESL_ACTIVE);
+
+ return flags;
+}
+
+static int qede_set_priv_flags(struct net_device *dev, u32 flags)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ u32 cflags = qede_get_priv_flags(dev);
+ u32 dflags = flags ^ cflags;
+
+ /* can only change RECOVER_ON_ERROR flag */
+ if (dflags & ~BIT(QEDE_PRI_FLAG_RECOVER_ON_ERROR))
+ return -EINVAL;
+
+ if (flags & BIT(QEDE_PRI_FLAG_RECOVER_ON_ERROR))
+ set_bit(QEDE_ERR_IS_RECOVERABLE, &edev->err_flags);
+ else
+ clear_bit(QEDE_ERR_IS_RECOVERABLE, &edev->err_flags);
+
+ return 0;
+}
+
+static int qede_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
+{
+ typeof(cmd->link_modes) *link_modes = &cmd->link_modes;
+ struct ethtool_link_settings *base = &cmd->base;
+ struct qede_dev *edev = netdev_priv(dev);
+ struct qed_link_output current_link;
+
+ __qede_lock(edev);
+
+ memset(&current_link, 0, sizeof(current_link));
+ edev->ops->common->get_link(edev->cdev, &current_link);
+
+ linkmode_copy(link_modes->supported, current_link.supported_caps);
+ linkmode_copy(link_modes->advertising, current_link.advertised_caps);
+ linkmode_copy(link_modes->lp_advertising, current_link.lp_caps);
+
+ if ((edev->state == QEDE_STATE_OPEN) && (current_link.link_up)) {
+ base->speed = current_link.speed;
+ base->duplex = current_link.duplex;
+ } else {
+ base->speed = SPEED_UNKNOWN;
+ base->duplex = DUPLEX_UNKNOWN;
+ }
+
+ __qede_unlock(edev);
+
+ base->port = current_link.port;
+ base->autoneg = (current_link.autoneg) ? AUTONEG_ENABLE :
+ AUTONEG_DISABLE;
+
+ return 0;
+}
+
+static int qede_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ const struct ethtool_link_settings *base = &cmd->base;
+ struct qede_dev *edev = netdev_priv(dev);
+ const struct qede_forced_speed_map *map;
+ struct qed_link_output current_link;
+ struct qed_link_params params;
+ u32 i;
+
+ if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) {
+ DP_INFO(edev, "Link settings are not allowed to be changed\n");
+ return -EOPNOTSUPP;
+ }
+ memset(&current_link, 0, sizeof(current_link));
+ memset(&params, 0, sizeof(params));
+ edev->ops->common->get_link(edev->cdev, &current_link);
+
+ params.override_flags |= QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS;
+ params.override_flags |= QED_LINK_OVERRIDE_SPEED_AUTONEG;
+
+ if (base->autoneg == AUTONEG_ENABLE) {
+ if (!phylink_test(current_link.supported_caps, Autoneg)) {
+ DP_INFO(edev, "Auto negotiation is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ params.autoneg = true;
+ params.forced_speed = 0;
+
+ linkmode_copy(params.adv_speeds, cmd->link_modes.advertising);
+ } else { /* forced speed */
+ params.override_flags |= QED_LINK_OVERRIDE_SPEED_FORCED_SPEED;
+ params.autoneg = false;
+ params.forced_speed = base->speed;
+
+ for (i = 0; i < ARRAY_SIZE(qede_forced_speed_maps); i++) {
+ map = qede_forced_speed_maps + i;
+
+ if (base->speed != map->speed ||
+ !linkmode_intersects(current_link.supported_caps,
+ map->caps))
+ continue;
+
+ linkmode_and(params.adv_speeds,
+ current_link.supported_caps, map->caps);
+ goto set_link;
+ }
+
+ DP_INFO(edev, "Unsupported speed %u\n", base->speed);
+ return -EINVAL;
+ }
+
+set_link:
+ params.link_up = true;
+ edev->ops->common->set_link(edev->cdev, &params);
+
+ return 0;
+}
+
+static void qede_get_drvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *info)
+{
+ char mfw[ETHTOOL_FWVERS_LEN], storm[ETHTOOL_FWVERS_LEN];
+ struct qede_dev *edev = netdev_priv(ndev);
+ char mbi[ETHTOOL_FWVERS_LEN];
+
+ strscpy(info->driver, "qede", sizeof(info->driver));
+
+ snprintf(storm, ETHTOOL_FWVERS_LEN, "%d.%d.%d.%d",
+ edev->dev_info.common.fw_major,
+ edev->dev_info.common.fw_minor,
+ edev->dev_info.common.fw_rev,
+ edev->dev_info.common.fw_eng);
+
+ snprintf(mfw, ETHTOOL_FWVERS_LEN, "%d.%d.%d.%d",
+ (edev->dev_info.common.mfw_rev >> 24) & 0xFF,
+ (edev->dev_info.common.mfw_rev >> 16) & 0xFF,
+ (edev->dev_info.common.mfw_rev >> 8) & 0xFF,
+ edev->dev_info.common.mfw_rev & 0xFF);
+
+ if ((strlen(storm) + strlen("[storm]")) <
+ sizeof(info->version))
+ snprintf(info->version, sizeof(info->version),
+ "[storm %s]", storm);
+ else
+ snprintf(info->version, sizeof(info->version),
+ "%s", storm);
+
+ if (edev->dev_info.common.mbi_version) {
+ snprintf(mbi, ETHTOOL_FWVERS_LEN, "%d.%d.%d",
+ (edev->dev_info.common.mbi_version &
+ QED_MBI_VERSION_2_MASK) >> QED_MBI_VERSION_2_OFFSET,
+ (edev->dev_info.common.mbi_version &
+ QED_MBI_VERSION_1_MASK) >> QED_MBI_VERSION_1_OFFSET,
+ (edev->dev_info.common.mbi_version &
+ QED_MBI_VERSION_0_MASK) >> QED_MBI_VERSION_0_OFFSET);
+ snprintf(info->fw_version, sizeof(info->fw_version),
+ "mbi %s [mfw %s]", mbi, mfw);
+ } else {
+ snprintf(info->fw_version, sizeof(info->fw_version),
+ "mfw %s", mfw);
+ }
+
+ strscpy(info->bus_info, pci_name(edev->pdev), sizeof(info->bus_info));
+}
+
+static void qede_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+
+ if (edev->dev_info.common.wol_support) {
+ wol->supported = WAKE_MAGIC;
+ wol->wolopts = edev->wol_enabled ? WAKE_MAGIC : 0;
+ }
+}
+
+static int qede_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+ bool wol_requested;
+ int rc;
+
+ if (wol->wolopts & ~WAKE_MAGIC) {
+ DP_INFO(edev,
+ "Can't support WoL options other than magic-packet\n");
+ return -EINVAL;
+ }
+
+ wol_requested = !!(wol->wolopts & WAKE_MAGIC);
+ if (wol_requested == edev->wol_enabled)
+ return 0;
+
+ /* Need to actually change configuration */
+ if (!edev->dev_info.common.wol_support) {
+ DP_INFO(edev, "Device doesn't support WoL\n");
+ return -EINVAL;
+ }
+
+ rc = edev->ops->common->update_wol(edev->cdev, wol_requested);
+ if (!rc)
+ edev->wol_enabled = wol_requested;
+
+ return rc;
+}
+
+static u32 qede_get_msglevel(struct net_device *ndev)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+
+ return ((u32)edev->dp_level << QED_LOG_LEVEL_SHIFT) | edev->dp_module;
+}
+
+static void qede_set_msglevel(struct net_device *ndev, u32 level)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+ u32 dp_module = 0;
+ u8 dp_level = 0;
+
+ qede_config_debug(level, &dp_module, &dp_level);
+
+ edev->dp_level = dp_level;
+ edev->dp_module = dp_module;
+ edev->ops->common->update_msglvl(edev->cdev,
+ dp_module, dp_level);
+}
+
+static int qede_nway_reset(struct net_device *dev)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ struct qed_link_output current_link;
+ struct qed_link_params link_params;
+
+ if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) {
+ DP_INFO(edev, "Link settings are not allowed to be changed\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (!netif_running(dev))
+ return 0;
+
+ memset(&current_link, 0, sizeof(current_link));
+ edev->ops->common->get_link(edev->cdev, &current_link);
+ if (!current_link.link_up)
+ return 0;
+
+ /* Toggle the link */
+ memset(&link_params, 0, sizeof(link_params));
+ link_params.link_up = false;
+ edev->ops->common->set_link(edev->cdev, &link_params);
+ link_params.link_up = true;
+ edev->ops->common->set_link(edev->cdev, &link_params);
+
+ return 0;
+}
+
+static u32 qede_get_link(struct net_device *dev)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ struct qed_link_output current_link;
+
+ memset(&current_link, 0, sizeof(current_link));
+ edev->ops->common->get_link(edev->cdev, &current_link);
+
+ return current_link.link_up;
+}
+
+static int qede_flash_device(struct net_device *dev,
+ struct ethtool_flash *flash)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ return edev->ops->common->nvm_flash(edev->cdev, flash->data);
+}
+
+static int qede_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ void *rx_handle = NULL, *tx_handle = NULL;
+ struct qede_dev *edev = netdev_priv(dev);
+ u16 rx_coal, tx_coal, i, rc = 0;
+ struct qede_fastpath *fp;
+
+ rx_coal = QED_DEFAULT_RX_USECS;
+ tx_coal = QED_DEFAULT_TX_USECS;
+
+ memset(coal, 0, sizeof(struct ethtool_coalesce));
+
+ __qede_lock(edev);
+ if (edev->state == QEDE_STATE_OPEN) {
+ for_each_queue(i) {
+ fp = &edev->fp_array[i];
+
+ if (fp->type & QEDE_FASTPATH_RX) {
+ rx_handle = fp->rxq->handle;
+ break;
+ }
+ }
+
+ rc = edev->ops->get_coalesce(edev->cdev, &rx_coal, rx_handle);
+ if (rc) {
+ DP_INFO(edev, "Read Rx coalesce error\n");
+ goto out;
+ }
+
+ for_each_queue(i) {
+ struct qede_tx_queue *txq;
+
+ fp = &edev->fp_array[i];
+
+ /* All TX queues of given fastpath uses same
+ * coalescing value, so no need to iterate over
+ * all TCs, TC0 txq should suffice.
+ */
+ if (fp->type & QEDE_FASTPATH_TX) {
+ txq = QEDE_FP_TC0_TXQ(fp);
+ tx_handle = txq->handle;
+ break;
+ }
+ }
+
+ rc = edev->ops->get_coalesce(edev->cdev, &tx_coal, tx_handle);
+ if (rc)
+ DP_INFO(edev, "Read Tx coalesce error\n");
+ }
+
+out:
+ __qede_unlock(edev);
+
+ coal->rx_coalesce_usecs = rx_coal;
+ coal->tx_coalesce_usecs = tx_coal;
+ coal->stats_block_coalesce_usecs = edev->stats_coal_usecs;
+
+ return rc;
+}
+
+int qede_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ struct qede_fastpath *fp;
+ int i, rc = 0;
+ u16 rxc, txc;
+
+ if (edev->stats_coal_usecs != coal->stats_block_coalesce_usecs) {
+ edev->stats_coal_usecs = coal->stats_block_coalesce_usecs;
+ if (edev->stats_coal_usecs) {
+ edev->stats_coal_ticks = usecs_to_jiffies(edev->stats_coal_usecs);
+ schedule_delayed_work(&edev->periodic_task, 0);
+
+ DP_INFO(edev, "Configured stats coal ticks=%lu jiffies\n",
+ edev->stats_coal_ticks);
+ } else {
+ cancel_delayed_work_sync(&edev->periodic_task);
+ }
+ }
+
+ if (!netif_running(dev)) {
+ DP_INFO(edev, "Interface is down\n");
+ return -EINVAL;
+ }
+
+ if (coal->rx_coalesce_usecs > QED_COALESCE_MAX ||
+ coal->tx_coalesce_usecs > QED_COALESCE_MAX) {
+ DP_INFO(edev,
+ "Can't support requested %s coalesce value [max supported value %d]\n",
+ coal->rx_coalesce_usecs > QED_COALESCE_MAX ? "rx" :
+ "tx", QED_COALESCE_MAX);
+ return -EINVAL;
+ }
+
+ rxc = (u16)coal->rx_coalesce_usecs;
+ txc = (u16)coal->tx_coalesce_usecs;
+ for_each_queue(i) {
+ fp = &edev->fp_array[i];
+
+ if (edev->fp_array[i].type & QEDE_FASTPATH_RX) {
+ rc = edev->ops->common->set_coalesce(edev->cdev,
+ rxc, 0,
+ fp->rxq->handle);
+ if (rc) {
+ DP_INFO(edev,
+ "Set RX coalesce error, rc = %d\n", rc);
+ return rc;
+ }
+ edev->coal_entry[i].rxc = rxc;
+ edev->coal_entry[i].isvalid = true;
+ }
+
+ if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
+ struct qede_tx_queue *txq;
+
+ /* All TX queues of given fastpath uses same
+ * coalescing value, so no need to iterate over
+ * all TCs, TC0 txq should suffice.
+ */
+ txq = QEDE_FP_TC0_TXQ(fp);
+
+ rc = edev->ops->common->set_coalesce(edev->cdev,
+ 0, txc,
+ txq->handle);
+ if (rc) {
+ DP_INFO(edev,
+ "Set TX coalesce error, rc = %d\n", rc);
+ return rc;
+ }
+ edev->coal_entry[i].txc = txc;
+ edev->coal_entry[i].isvalid = true;
+ }
+ }
+
+ return rc;
+}
+
+static void qede_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ ering->rx_max_pending = NUM_RX_BDS_MAX;
+ ering->rx_pending = edev->q_num_rx_buffers;
+ ering->tx_max_pending = NUM_TX_BDS_MAX;
+ ering->tx_pending = edev->q_num_tx_buffers;
+}
+
+static int qede_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
+ "Set ring params command parameters: rx_pending = %d, tx_pending = %d\n",
+ ering->rx_pending, ering->tx_pending);
+
+ /* Validate legality of configuration */
+ if (ering->rx_pending > NUM_RX_BDS_MAX ||
+ ering->rx_pending < NUM_RX_BDS_MIN ||
+ ering->tx_pending > NUM_TX_BDS_MAX ||
+ ering->tx_pending < NUM_TX_BDS_MIN) {
+ DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
+ "Can only support Rx Buffer size [0%08x,...,0x%08x] and Tx Buffer size [0x%08x,...,0x%08x]\n",
+ NUM_RX_BDS_MIN, NUM_RX_BDS_MAX,
+ NUM_TX_BDS_MIN, NUM_TX_BDS_MAX);
+ return -EINVAL;
+ }
+
+ /* Change ring size and re-load */
+ edev->q_num_rx_buffers = ering->rx_pending;
+ edev->q_num_tx_buffers = ering->tx_pending;
+
+ qede_reload(edev, NULL, false);
+
+ return 0;
+}
+
+static void qede_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *epause)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ struct qed_link_output current_link;
+
+ memset(&current_link, 0, sizeof(current_link));
+ edev->ops->common->get_link(edev->cdev, &current_link);
+
+ if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
+ epause->autoneg = true;
+ if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE)
+ epause->rx_pause = true;
+ if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE)
+ epause->tx_pause = true;
+
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "ethtool_pauseparam: cmd %d autoneg %d rx_pause %d tx_pause %d\n",
+ epause->cmd, epause->autoneg, epause->rx_pause,
+ epause->tx_pause);
+}
+
+static int qede_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *epause)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ struct qed_link_params params;
+ struct qed_link_output current_link;
+
+ if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) {
+ DP_INFO(edev,
+ "Pause settings are not allowed to be changed\n");
+ return -EOPNOTSUPP;
+ }
+
+ memset(&current_link, 0, sizeof(current_link));
+ edev->ops->common->get_link(edev->cdev, &current_link);
+
+ memset(&params, 0, sizeof(params));
+ params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG;
+
+ if (epause->autoneg) {
+ if (!phylink_test(current_link.supported_caps, Autoneg)) {
+ DP_INFO(edev, "autoneg not supported\n");
+ return -EINVAL;
+ }
+
+ params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
+ }
+
+ if (epause->rx_pause)
+ params.pause_config |= QED_LINK_PAUSE_RX_ENABLE;
+ if (epause->tx_pause)
+ params.pause_config |= QED_LINK_PAUSE_TX_ENABLE;
+
+ params.link_up = true;
+ edev->ops->common->set_link(edev->cdev, &params);
+
+ return 0;
+}
+
+static void qede_get_regs(struct net_device *ndev,
+ struct ethtool_regs *regs, void *buffer)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+
+ regs->version = 0;
+ memset(buffer, 0, regs->len);
+
+ if (edev->ops && edev->ops->common)
+ edev->ops->common->dbg_all_data(edev->cdev, buffer);
+}
+
+static int qede_get_regs_len(struct net_device *ndev)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+
+ if (edev->ops && edev->ops->common)
+ return edev->ops->common->dbg_all_data_size(edev->cdev);
+ else
+ return -EINVAL;
+}
+
+static void qede_update_mtu(struct qede_dev *edev,
+ struct qede_reload_args *args)
+{
+ edev->ndev->mtu = args->u.mtu;
+}
+
+/* Netdevice NDOs */
+int qede_change_mtu(struct net_device *ndev, int new_mtu)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+ struct qede_reload_args args;
+
+ DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
+ "Configuring MTU size of %d\n", new_mtu);
+
+ if (new_mtu > PAGE_SIZE)
+ ndev->features &= ~NETIF_F_GRO_HW;
+
+ /* Set the mtu field and re-start the interface if needed */
+ args.u.mtu = new_mtu;
+ args.func = &qede_update_mtu;
+ qede_reload(edev, &args, false);
+#if IS_ENABLED(CONFIG_QED_RDMA)
+ qede_rdma_event_change_mtu(edev);
+#endif
+ edev->ops->common->update_mtu(edev->cdev, new_mtu);
+
+ return 0;
+}
+
+static void qede_get_channels(struct net_device *dev,
+ struct ethtool_channels *channels)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ channels->max_combined = QEDE_MAX_RSS_CNT(edev);
+ channels->max_rx = QEDE_MAX_RSS_CNT(edev);
+ channels->max_tx = QEDE_MAX_RSS_CNT(edev);
+ channels->combined_count = QEDE_QUEUE_CNT(edev) - edev->fp_num_tx -
+ edev->fp_num_rx;
+ channels->tx_count = edev->fp_num_tx;
+ channels->rx_count = edev->fp_num_rx;
+}
+
+static int qede_set_channels(struct net_device *dev,
+ struct ethtool_channels *channels)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ u32 count;
+
+ DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
+ "set-channels command parameters: rx = %d, tx = %d, other = %d, combined = %d\n",
+ channels->rx_count, channels->tx_count,
+ channels->other_count, channels->combined_count);
+
+ count = channels->rx_count + channels->tx_count +
+ channels->combined_count;
+
+ /* We don't support `other' channels */
+ if (channels->other_count) {
+ DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
+ "command parameters not supported\n");
+ return -EINVAL;
+ }
+
+ if (!(channels->combined_count || (channels->rx_count &&
+ channels->tx_count))) {
+ DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
+ "need to request at least one transmit and one receive channel\n");
+ return -EINVAL;
+ }
+
+ if (count > QEDE_MAX_RSS_CNT(edev)) {
+ DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
+ "requested channels = %d max supported channels = %d\n",
+ count, QEDE_MAX_RSS_CNT(edev));
+ return -EINVAL;
+ }
+
+ /* Check if there was a change in the active parameters */
+ if ((count == QEDE_QUEUE_CNT(edev)) &&
+ (channels->tx_count == edev->fp_num_tx) &&
+ (channels->rx_count == edev->fp_num_rx)) {
+ DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
+ "No change in active parameters\n");
+ return 0;
+ }
+
+ /* We need the number of queues to be divisible between the hwfns */
+ if ((count % edev->dev_info.common.num_hwfns) ||
+ (channels->tx_count % edev->dev_info.common.num_hwfns) ||
+ (channels->rx_count % edev->dev_info.common.num_hwfns)) {
+ DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
+ "Number of channels must be divisible by %04x\n",
+ edev->dev_info.common.num_hwfns);
+ return -EINVAL;
+ }
+
+ /* Set number of queues and reload if necessary */
+ edev->req_queues = count;
+ edev->req_num_tx = channels->tx_count;
+ edev->req_num_rx = channels->rx_count;
+ /* Reset the indirection table if rx queue count is updated */
+ if ((edev->req_queues - edev->req_num_tx) != QEDE_RSS_COUNT(edev)) {
+ edev->rss_params_inited &= ~QEDE_RSS_INDIR_INITED;
+ memset(edev->rss_ind_table, 0, sizeof(edev->rss_ind_table));
+ }
+
+ qede_reload(edev, NULL, false);
+
+ return 0;
+}
+
+static int qede_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ return qede_ptp_get_ts_info(edev, info);
+}
+
+static int qede_set_phys_id(struct net_device *dev,
+ enum ethtool_phys_id_state state)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ u8 led_state = 0;
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ return 1; /* cycle on/off once per second */
+
+ case ETHTOOL_ID_ON:
+ led_state = QED_LED_MODE_ON;
+ break;
+
+ case ETHTOOL_ID_OFF:
+ led_state = QED_LED_MODE_OFF;
+ break;
+
+ case ETHTOOL_ID_INACTIVE:
+ led_state = QED_LED_MODE_RESTORE;
+ break;
+ }
+
+ edev->ops->common->set_led(edev->cdev, led_state);
+
+ return 0;
+}
+
+static int qede_get_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info)
+{
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ case UDP_V4_FLOW:
+ if (edev->rss_caps & QED_RSS_IPV4_UDP)
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ case UDP_V6_FLOW:
+ if (edev->rss_caps & QED_RSS_IPV6_UDP)
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ break;
+ default:
+ info->data = 0;
+ break;
+ }
+
+ return 0;
+}
+
+static int qede_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
+ u32 *rule_locs)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ int rc = 0;
+
+ switch (info->cmd) {
+ case ETHTOOL_GRXRINGS:
+ info->data = QEDE_RSS_COUNT(edev);
+ break;
+ case ETHTOOL_GRXFH:
+ rc = qede_get_rss_flags(edev, info);
+ break;
+ case ETHTOOL_GRXCLSRLCNT:
+ info->rule_cnt = qede_get_arfs_filter_count(edev);
+ info->data = QEDE_RFS_MAX_FLTR;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ rc = qede_get_cls_rule_entry(edev, info);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ rc = qede_get_cls_rule_all(edev, info, rule_locs);
+ break;
+ default:
+ DP_ERR(edev, "Command parameters not supported\n");
+ rc = -EOPNOTSUPP;
+ }
+
+ return rc;
+}
+
+static int qede_set_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info)
+{
+ struct qed_update_vport_params *vport_update_params;
+ u8 set_caps = 0, clr_caps = 0;
+ int rc = 0;
+
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "Set rss flags command parameters: flow type = %d, data = %llu\n",
+ info->flow_type, info->data);
+
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ /* For TCP only 4-tuple hash is supported */
+ if (info->data ^ (RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ DP_INFO(edev, "Command parameters not supported\n");
+ return -EINVAL;
+ }
+ return 0;
+ case UDP_V4_FLOW:
+ /* For UDP either 2-tuple hash or 4-tuple hash is supported */
+ if (info->data == (RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ set_caps = QED_RSS_IPV4_UDP;
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "UDP 4-tuple enabled\n");
+ } else if (info->data == (RXH_IP_SRC | RXH_IP_DST)) {
+ clr_caps = QED_RSS_IPV4_UDP;
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "UDP 4-tuple disabled\n");
+ } else {
+ return -EINVAL;
+ }
+ break;
+ case UDP_V6_FLOW:
+ /* For UDP either 2-tuple hash or 4-tuple hash is supported */
+ if (info->data == (RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ set_caps = QED_RSS_IPV6_UDP;
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "UDP 4-tuple enabled\n");
+ } else if (info->data == (RXH_IP_SRC | RXH_IP_DST)) {
+ clr_caps = QED_RSS_IPV6_UDP;
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "UDP 4-tuple disabled\n");
+ } else {
+ return -EINVAL;
+ }
+ break;
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ /* For IP only 2-tuple hash is supported */
+ if (info->data ^ (RXH_IP_SRC | RXH_IP_DST)) {
+ DP_INFO(edev, "Command parameters not supported\n");
+ return -EINVAL;
+ }
+ return 0;
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case IP_USER_FLOW:
+ case ETHER_FLOW:
+ /* RSS is not supported for these protocols */
+ if (info->data) {
+ DP_INFO(edev, "Command parameters not supported\n");
+ return -EINVAL;
+ }
+ return 0;
+ default:
+ return -EINVAL;
+ }
+
+ /* No action is needed if there is no change in the rss capability */
+ if (edev->rss_caps == ((edev->rss_caps & ~clr_caps) | set_caps))
+ return 0;
+
+ /* Update internal configuration */
+ edev->rss_caps = ((edev->rss_caps & ~clr_caps) | set_caps);
+ edev->rss_params_inited |= QEDE_RSS_CAPS_INITED;
+
+ /* Re-configure if possible */
+ __qede_lock(edev);
+ if (edev->state == QEDE_STATE_OPEN) {
+ vport_update_params = vzalloc(sizeof(*vport_update_params));
+ if (!vport_update_params) {
+ __qede_unlock(edev);
+ return -ENOMEM;
+ }
+ qede_fill_rss_params(edev, &vport_update_params->rss_params,
+ &vport_update_params->update_rss_flg);
+ rc = edev->ops->vport_update(edev->cdev, vport_update_params);
+ vfree(vport_update_params);
+ }
+ __qede_unlock(edev);
+
+ return rc;
+}
+
+static int qede_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ int rc;
+
+ switch (info->cmd) {
+ case ETHTOOL_SRXFH:
+ rc = qede_set_rss_flags(edev, info);
+ break;
+ case ETHTOOL_SRXCLSRLINS:
+ rc = qede_add_cls_rule(edev, info);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ rc = qede_delete_flow_filter(edev, info->fs.location);
+ break;
+ default:
+ DP_INFO(edev, "Command parameters not supported\n");
+ rc = -EOPNOTSUPP;
+ }
+
+ return rc;
+}
+
+static u32 qede_get_rxfh_indir_size(struct net_device *dev)
+{
+ return QED_RSS_IND_TABLE_SIZE;
+}
+
+static u32 qede_get_rxfh_key_size(struct net_device *dev)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ return sizeof(edev->rss_key);
+}
+
+static int qede_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ int i;
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+
+ if (!indir)
+ return 0;
+
+ for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++)
+ indir[i] = edev->rss_ind_table[i];
+
+ if (key)
+ memcpy(key, edev->rss_key, qede_get_rxfh_key_size(dev));
+
+ return 0;
+}
+
+static int qede_set_rxfh(struct net_device *dev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct qed_update_vport_params *vport_update_params;
+ struct qede_dev *edev = netdev_priv(dev);
+ int i, rc = 0;
+
+ if (edev->dev_info.common.num_hwfns > 1) {
+ DP_INFO(edev,
+ "RSS configuration is not supported for 100G devices\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+
+ if (!indir && !key)
+ return 0;
+
+ if (indir) {
+ for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++)
+ edev->rss_ind_table[i] = indir[i];
+ edev->rss_params_inited |= QEDE_RSS_INDIR_INITED;
+ }
+
+ if (key) {
+ memcpy(&edev->rss_key, key, qede_get_rxfh_key_size(dev));
+ edev->rss_params_inited |= QEDE_RSS_KEY_INITED;
+ }
+
+ __qede_lock(edev);
+ if (edev->state == QEDE_STATE_OPEN) {
+ vport_update_params = vzalloc(sizeof(*vport_update_params));
+ if (!vport_update_params) {
+ __qede_unlock(edev);
+ return -ENOMEM;
+ }
+ qede_fill_rss_params(edev, &vport_update_params->rss_params,
+ &vport_update_params->update_rss_flg);
+ rc = edev->ops->vport_update(edev->cdev, vport_update_params);
+ vfree(vport_update_params);
+ }
+ __qede_unlock(edev);
+
+ return rc;
+}
+
+/* This function enables the interrupt generation and the NAPI on the device */
+static void qede_netif_start(struct qede_dev *edev)
+{
+ int i;
+
+ if (!netif_running(edev->ndev))
+ return;
+
+ for_each_queue(i) {
+ /* Update and reenable interrupts */
+ qed_sb_ack(edev->fp_array[i].sb_info, IGU_INT_ENABLE, 1);
+ napi_enable(&edev->fp_array[i].napi);
+ }
+}
+
+/* This function disables the NAPI and the interrupt generation on the device */
+static void qede_netif_stop(struct qede_dev *edev)
+{
+ int i;
+
+ for_each_queue(i) {
+ napi_disable(&edev->fp_array[i].napi);
+ /* Disable interrupts */
+ qed_sb_ack(edev->fp_array[i].sb_info, IGU_INT_DISABLE, 0);
+ }
+}
+
+static int qede_selftest_transmit_traffic(struct qede_dev *edev,
+ struct sk_buff *skb)
+{
+ struct qede_tx_queue *txq = NULL;
+ struct eth_tx_1st_bd *first_bd;
+ dma_addr_t mapping;
+ int i, idx;
+ u16 val;
+
+ for_each_queue(i) {
+ struct qede_fastpath *fp = &edev->fp_array[i];
+
+ if (fp->type & QEDE_FASTPATH_TX) {
+ txq = QEDE_FP_TC0_TXQ(fp);
+ break;
+ }
+ }
+
+ if (!txq) {
+ DP_NOTICE(edev, "Tx path is not available\n");
+ return -1;
+ }
+
+ /* Fill the entry in the SW ring and the BDs in the FW ring */
+ idx = txq->sw_tx_prod;
+ txq->sw_tx_ring.skbs[idx].skb = skb;
+ first_bd = qed_chain_produce(&txq->tx_pbl);
+ memset(first_bd, 0, sizeof(*first_bd));
+ val = 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
+ first_bd->data.bd_flags.bitfields = val;
+ val = skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK;
+ val = val << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
+ first_bd->data.bitfields |= cpu_to_le16(val);
+
+ /* Map skb linear data for DMA and set in the first BD */
+ mapping = dma_map_single(&edev->pdev->dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
+ DP_NOTICE(edev, "SKB mapping failed\n");
+ return -ENOMEM;
+ }
+ BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb));
+
+ /* update the first BD with the actual num BDs */
+ first_bd->data.nbds = 1;
+ txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers;
+ /* 'next page' entries are counted in the producer value */
+ val = qed_chain_get_prod_idx(&txq->tx_pbl);
+ txq->tx_db.data.bd_prod = cpu_to_le16(val);
+
+ /* wmb makes sure that the BDs data is updated before updating the
+ * producer, otherwise FW may read old data from the BDs.
+ */
+ wmb();
+ barrier();
+ writel(txq->tx_db.raw, txq->doorbell_addr);
+
+ for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) {
+ if (qede_txq_has_work(txq))
+ break;
+ usleep_range(100, 200);
+ }
+
+ if (!qede_txq_has_work(txq)) {
+ DP_NOTICE(edev, "Tx completion didn't happen\n");
+ return -1;
+ }
+
+ first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
+ dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
+ BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE);
+ txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers;
+ txq->sw_tx_ring.skbs[idx].skb = NULL;
+
+ return 0;
+}
+
+static int qede_selftest_receive_traffic(struct qede_dev *edev)
+{
+ u16 sw_rx_index, len;
+ struct eth_fast_path_rx_reg_cqe *fp_cqe;
+ struct qede_rx_queue *rxq = NULL;
+ struct sw_rx_data *sw_rx_data;
+ union eth_rx_cqe *cqe;
+ int i, iter, rc = 0;
+ u8 *data_ptr;
+
+ for_each_queue(i) {
+ if (edev->fp_array[i].type & QEDE_FASTPATH_RX) {
+ rxq = edev->fp_array[i].rxq;
+ break;
+ }
+ }
+
+ if (!rxq) {
+ DP_NOTICE(edev, "Rx path is not available\n");
+ return -1;
+ }
+
+ /* The packet is expected to receive on rx-queue 0 even though RSS is
+ * enabled. This is because the queue 0 is configured as the default
+ * queue and that the loopback traffic is not IP.
+ */
+ for (iter = 0; iter < QEDE_SELFTEST_POLL_COUNT; iter++) {
+ if (!qede_has_rx_work(rxq)) {
+ usleep_range(100, 200);
+ continue;
+ }
+
+ /* Get the CQE from the completion ring */
+ cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
+
+ /* Get the data from the SW ring */
+ sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
+ sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
+ fp_cqe = &cqe->fast_path_regular;
+ len = le16_to_cpu(fp_cqe->len_on_first_bd);
+ data_ptr = (u8 *)(page_address(sw_rx_data->data) +
+ fp_cqe->placement_offset +
+ sw_rx_data->page_offset +
+ rxq->rx_headroom);
+ if (ether_addr_equal(data_ptr, edev->ndev->dev_addr) &&
+ ether_addr_equal(data_ptr + ETH_ALEN,
+ edev->ndev->dev_addr)) {
+ for (i = ETH_HLEN; i < len; i++)
+ if (data_ptr[i] != (unsigned char)(i & 0xff)) {
+ rc = -1;
+ break;
+ }
+
+ qede_recycle_rx_bd_ring(rxq, 1);
+ qed_chain_recycle_consumed(&rxq->rx_comp_ring);
+ break;
+ }
+
+ DP_INFO(edev, "Not the transmitted packet\n");
+ qede_recycle_rx_bd_ring(rxq, 1);
+ qed_chain_recycle_consumed(&rxq->rx_comp_ring);
+ }
+
+ if (iter == QEDE_SELFTEST_POLL_COUNT) {
+ DP_NOTICE(edev, "Failed to receive the traffic\n");
+ return -1;
+ }
+
+ qede_update_rx_prod(edev, rxq);
+
+ return rc;
+}
+
+static int qede_selftest_run_loopback(struct qede_dev *edev, u32 loopback_mode)
+{
+ struct qed_link_params link_params;
+ struct sk_buff *skb = NULL;
+ int rc = 0, i;
+ u32 pkt_size;
+ u8 *packet;
+
+ if (!netif_running(edev->ndev)) {
+ DP_NOTICE(edev, "Interface is down\n");
+ return -EINVAL;
+ }
+
+ qede_netif_stop(edev);
+
+ /* Bring up the link in Loopback mode */
+ memset(&link_params, 0, sizeof(link_params));
+ link_params.link_up = true;
+ link_params.override_flags = QED_LINK_OVERRIDE_LOOPBACK_MODE;
+ link_params.loopback_mode = loopback_mode;
+ edev->ops->common->set_link(edev->cdev, &link_params);
+
+ /* Wait for loopback configuration to apply */
+ msleep_interruptible(500);
+
+ /* Setting max packet size to 1.5K to avoid data being split over
+ * multiple BDs in cases where MTU > PAGE_SIZE.
+ */
+ pkt_size = (((edev->ndev->mtu < ETH_DATA_LEN) ?
+ edev->ndev->mtu : ETH_DATA_LEN) + ETH_HLEN);
+
+ skb = netdev_alloc_skb(edev->ndev, pkt_size);
+ if (!skb) {
+ DP_INFO(edev, "Can't allocate skb\n");
+ rc = -ENOMEM;
+ goto test_loopback_exit;
+ }
+ packet = skb_put(skb, pkt_size);
+ ether_addr_copy(packet, edev->ndev->dev_addr);
+ ether_addr_copy(packet + ETH_ALEN, edev->ndev->dev_addr);
+ memset(packet + (2 * ETH_ALEN), 0x77, (ETH_HLEN - (2 * ETH_ALEN)));
+ for (i = ETH_HLEN; i < pkt_size; i++)
+ packet[i] = (unsigned char)(i & 0xff);
+
+ rc = qede_selftest_transmit_traffic(edev, skb);
+ if (rc)
+ goto test_loopback_exit;
+
+ rc = qede_selftest_receive_traffic(edev);
+ if (rc)
+ goto test_loopback_exit;
+
+ DP_VERBOSE(edev, NETIF_MSG_RX_STATUS, "Loopback test successful\n");
+
+test_loopback_exit:
+ dev_kfree_skb(skb);
+
+ /* Bring up the link in Normal mode */
+ memset(&link_params, 0, sizeof(link_params));
+ link_params.link_up = true;
+ link_params.override_flags = QED_LINK_OVERRIDE_LOOPBACK_MODE;
+ link_params.loopback_mode = QED_LINK_LOOPBACK_NONE;
+ edev->ops->common->set_link(edev->cdev, &link_params);
+
+ /* Wait for loopback configuration to apply */
+ msleep_interruptible(500);
+
+ qede_netif_start(edev);
+
+ return rc;
+}
+
+static void qede_self_test(struct net_device *dev,
+ struct ethtool_test *etest, u64 *buf)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "Self-test command parameters: offline = %d, external_lb = %d\n",
+ (etest->flags & ETH_TEST_FL_OFFLINE),
+ (etest->flags & ETH_TEST_FL_EXTERNAL_LB) >> 2);
+
+ memset(buf, 0, sizeof(u64) * QEDE_ETHTOOL_TEST_MAX);
+
+ if (etest->flags & ETH_TEST_FL_OFFLINE) {
+ if (qede_selftest_run_loopback(edev,
+ QED_LINK_LOOPBACK_INT_PHY)) {
+ buf[QEDE_ETHTOOL_INT_LOOPBACK] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+ }
+
+ if (edev->ops->common->selftest->selftest_interrupt(edev->cdev)) {
+ buf[QEDE_ETHTOOL_INTERRUPT_TEST] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+
+ if (edev->ops->common->selftest->selftest_memory(edev->cdev)) {
+ buf[QEDE_ETHTOOL_MEMORY_TEST] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+
+ if (edev->ops->common->selftest->selftest_register(edev->cdev)) {
+ buf[QEDE_ETHTOOL_REGISTER_TEST] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+
+ if (edev->ops->common->selftest->selftest_clock(edev->cdev)) {
+ buf[QEDE_ETHTOOL_CLOCK_TEST] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+
+ if (edev->ops->common->selftest->selftest_nvram(edev->cdev)) {
+ buf[QEDE_ETHTOOL_NVRAM_TEST] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+}
+
+static int qede_set_tunable(struct net_device *dev,
+ const struct ethtool_tunable *tuna,
+ const void *data)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ u32 val;
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ val = *(u32 *)data;
+ if (val < QEDE_MIN_PKT_LEN || val > QEDE_RX_HDR_SIZE) {
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "Invalid rx copy break value, range is [%u, %u]",
+ QEDE_MIN_PKT_LEN, QEDE_RX_HDR_SIZE);
+ return -EINVAL;
+ }
+
+ edev->rx_copybreak = *(u32 *)data;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int qede_get_tunable(struct net_device *dev,
+ const struct ethtool_tunable *tuna, void *data)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ *(u32 *)data = edev->rx_copybreak;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int qede_get_eee(struct net_device *dev, struct ethtool_eee *edata)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ struct qed_link_output current_link;
+
+ memset(&current_link, 0, sizeof(current_link));
+ edev->ops->common->get_link(edev->cdev, &current_link);
+
+ if (!current_link.eee_supported) {
+ DP_INFO(edev, "EEE is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (current_link.eee.adv_caps & QED_EEE_1G_ADV)
+ edata->advertised = ADVERTISED_1000baseT_Full;
+ if (current_link.eee.adv_caps & QED_EEE_10G_ADV)
+ edata->advertised |= ADVERTISED_10000baseT_Full;
+ if (current_link.sup_caps & QED_EEE_1G_ADV)
+ edata->supported = ADVERTISED_1000baseT_Full;
+ if (current_link.sup_caps & QED_EEE_10G_ADV)
+ edata->supported |= ADVERTISED_10000baseT_Full;
+ if (current_link.eee.lp_adv_caps & QED_EEE_1G_ADV)
+ edata->lp_advertised = ADVERTISED_1000baseT_Full;
+ if (current_link.eee.lp_adv_caps & QED_EEE_10G_ADV)
+ edata->lp_advertised |= ADVERTISED_10000baseT_Full;
+
+ edata->tx_lpi_timer = current_link.eee.tx_lpi_timer;
+ edata->eee_enabled = current_link.eee.enable;
+ edata->tx_lpi_enabled = current_link.eee.tx_lpi_enable;
+ edata->eee_active = current_link.eee_active;
+
+ return 0;
+}
+
+static int qede_set_eee(struct net_device *dev, struct ethtool_eee *edata)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ struct qed_link_output current_link;
+ struct qed_link_params params;
+
+ if (!edev->ops->common->can_link_change(edev->cdev)) {
+ DP_INFO(edev, "Link settings are not allowed to be changed\n");
+ return -EOPNOTSUPP;
+ }
+
+ memset(&current_link, 0, sizeof(current_link));
+ edev->ops->common->get_link(edev->cdev, &current_link);
+
+ if (!current_link.eee_supported) {
+ DP_INFO(edev, "EEE is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ memset(&params, 0, sizeof(params));
+ params.override_flags |= QED_LINK_OVERRIDE_EEE_CONFIG;
+
+ if (!(edata->advertised & (ADVERTISED_1000baseT_Full |
+ ADVERTISED_10000baseT_Full)) ||
+ ((edata->advertised & (ADVERTISED_1000baseT_Full |
+ ADVERTISED_10000baseT_Full)) !=
+ edata->advertised)) {
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "Invalid advertised capabilities %d\n",
+ edata->advertised);
+ return -EINVAL;
+ }
+
+ if (edata->advertised & ADVERTISED_1000baseT_Full)
+ params.eee.adv_caps = QED_EEE_1G_ADV;
+ if (edata->advertised & ADVERTISED_10000baseT_Full)
+ params.eee.adv_caps |= QED_EEE_10G_ADV;
+ params.eee.enable = edata->eee_enabled;
+ params.eee.tx_lpi_enable = edata->tx_lpi_enabled;
+ params.eee.tx_lpi_timer = edata->tx_lpi_timer;
+
+ params.link_up = true;
+ edev->ops->common->set_link(edev->cdev, &params);
+
+ return 0;
+}
+
+static u32 qede_link_to_ethtool_fec(u32 link_fec)
+{
+ u32 eth_fec = 0;
+
+ if (link_fec & QED_FEC_MODE_NONE)
+ eth_fec |= ETHTOOL_FEC_OFF;
+ if (link_fec & QED_FEC_MODE_FIRECODE)
+ eth_fec |= ETHTOOL_FEC_BASER;
+ if (link_fec & QED_FEC_MODE_RS)
+ eth_fec |= ETHTOOL_FEC_RS;
+ if (link_fec & QED_FEC_MODE_AUTO)
+ eth_fec |= ETHTOOL_FEC_AUTO;
+ if (link_fec & QED_FEC_MODE_UNSUPPORTED)
+ eth_fec |= ETHTOOL_FEC_NONE;
+
+ return eth_fec;
+}
+
+static u32 qede_ethtool_to_link_fec(u32 eth_fec)
+{
+ u32 link_fec = 0;
+
+ if (eth_fec & ETHTOOL_FEC_OFF)
+ link_fec |= QED_FEC_MODE_NONE;
+ if (eth_fec & ETHTOOL_FEC_BASER)
+ link_fec |= QED_FEC_MODE_FIRECODE;
+ if (eth_fec & ETHTOOL_FEC_RS)
+ link_fec |= QED_FEC_MODE_RS;
+ if (eth_fec & ETHTOOL_FEC_AUTO)
+ link_fec |= QED_FEC_MODE_AUTO;
+ if (eth_fec & ETHTOOL_FEC_NONE)
+ link_fec |= QED_FEC_MODE_UNSUPPORTED;
+
+ return link_fec;
+}
+
+static int qede_get_fecparam(struct net_device *dev,
+ struct ethtool_fecparam *fecparam)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ struct qed_link_output curr_link;
+
+ memset(&curr_link, 0, sizeof(curr_link));
+ edev->ops->common->get_link(edev->cdev, &curr_link);
+
+ fecparam->active_fec = qede_link_to_ethtool_fec(curr_link.active_fec);
+ fecparam->fec = qede_link_to_ethtool_fec(curr_link.sup_fec);
+
+ return 0;
+}
+
+static int qede_set_fecparam(struct net_device *dev,
+ struct ethtool_fecparam *fecparam)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ struct qed_link_params params;
+
+ if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) {
+ DP_INFO(edev, "Link settings are not allowed to be changed\n");
+ return -EOPNOTSUPP;
+ }
+
+ memset(&params, 0, sizeof(params));
+ params.override_flags |= QED_LINK_OVERRIDE_FEC_CONFIG;
+ params.fec = qede_ethtool_to_link_fec(fecparam->fec);
+ params.link_up = true;
+
+ edev->ops->common->set_link(edev->cdev, &params);
+
+ return 0;
+}
+
+static int qede_get_module_info(struct net_device *dev,
+ struct ethtool_modinfo *modinfo)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ u8 buf[4];
+ int rc;
+
+ /* Read first 4 bytes to find the sfp type */
+ rc = edev->ops->common->read_module_eeprom(edev->cdev, buf,
+ QED_I2C_DEV_ADDR_A0, 0, 4);
+ if (rc) {
+ DP_ERR(edev, "Failed reading EEPROM data %d\n", rc);
+ return rc;
+ }
+
+ switch (buf[0]) {
+ case 0x3: /* SFP, SFP+, SFP-28 */
+ modinfo->type = ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ break;
+ case 0xc: /* QSFP */
+ case 0xd: /* QSFP+ */
+ modinfo->type = ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+ break;
+ case 0x11: /* QSFP-28 */
+ modinfo->type = ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+ break;
+ default:
+ DP_ERR(edev, "Unknown transceiver type 0x%x\n", buf[0]);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int qede_get_module_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *ee, u8 *data)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ u32 start_addr = ee->offset, size = 0;
+ u8 *buf = data;
+ int rc = 0;
+
+ /* Read A0 section */
+ if (ee->offset < ETH_MODULE_SFF_8079_LEN) {
+ /* Limit transfer size to the A0 section boundary */
+ if (ee->offset + ee->len > ETH_MODULE_SFF_8079_LEN)
+ size = ETH_MODULE_SFF_8079_LEN - ee->offset;
+ else
+ size = ee->len;
+
+ rc = edev->ops->common->read_module_eeprom(edev->cdev, buf,
+ QED_I2C_DEV_ADDR_A0,
+ start_addr, size);
+ if (rc) {
+ DP_ERR(edev, "Failed reading A0 section %d\n", rc);
+ return rc;
+ }
+
+ buf += size;
+ start_addr += size;
+ }
+
+ /* Read A2 section */
+ if (start_addr >= ETH_MODULE_SFF_8079_LEN &&
+ start_addr < ETH_MODULE_SFF_8472_LEN) {
+ size = ee->len - size;
+ /* Limit transfer size to the A2 section boundary */
+ if (start_addr + size > ETH_MODULE_SFF_8472_LEN)
+ size = ETH_MODULE_SFF_8472_LEN - start_addr;
+ start_addr -= ETH_MODULE_SFF_8079_LEN;
+ rc = edev->ops->common->read_module_eeprom(edev->cdev, buf,
+ QED_I2C_DEV_ADDR_A2,
+ start_addr, size);
+ if (rc) {
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "Failed reading A2 section %d\n", rc);
+ return 0;
+ }
+ }
+
+ return rc;
+}
+
+static int qede_set_dump(struct net_device *dev, struct ethtool_dump *val)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ int rc = 0;
+
+ if (edev->dump_info.cmd == QEDE_DUMP_CMD_NONE) {
+ if (val->flag > QEDE_DUMP_CMD_MAX) {
+ DP_ERR(edev, "Invalid command %d\n", val->flag);
+ return -EINVAL;
+ }
+ edev->dump_info.cmd = val->flag;
+ edev->dump_info.num_args = 0;
+ return 0;
+ }
+
+ if (edev->dump_info.num_args == QEDE_DUMP_MAX_ARGS) {
+ DP_ERR(edev, "Arg count = %d\n", edev->dump_info.num_args);
+ return -EINVAL;
+ }
+
+ switch (edev->dump_info.cmd) {
+ case QEDE_DUMP_CMD_NVM_CFG:
+ edev->dump_info.args[edev->dump_info.num_args] = val->flag;
+ edev->dump_info.num_args++;
+ break;
+ case QEDE_DUMP_CMD_GRCDUMP:
+ rc = edev->ops->common->set_grc_config(edev->cdev,
+ val->flag, 1);
+ break;
+ default:
+ break;
+ }
+
+ return rc;
+}
+
+static int qede_get_dump_flag(struct net_device *dev,
+ struct ethtool_dump *dump)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ if (!edev->ops || !edev->ops->common) {
+ DP_ERR(edev, "Edev ops not populated\n");
+ return -EINVAL;
+ }
+
+ dump->version = QEDE_DUMP_VERSION;
+ switch (edev->dump_info.cmd) {
+ case QEDE_DUMP_CMD_NVM_CFG:
+ dump->flag = QEDE_DUMP_CMD_NVM_CFG;
+ dump->len = edev->ops->common->read_nvm_cfg_len(edev->cdev,
+ edev->dump_info.args[0]);
+ break;
+ case QEDE_DUMP_CMD_GRCDUMP:
+ dump->flag = QEDE_DUMP_CMD_GRCDUMP;
+ dump->len = edev->ops->common->dbg_all_data_size(edev->cdev);
+ break;
+ default:
+ DP_ERR(edev, "Invalid cmd = %d\n", edev->dump_info.cmd);
+ return -EINVAL;
+ }
+
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "dump->version = 0x%x dump->flag = %d dump->len = %d\n",
+ dump->version, dump->flag, dump->len);
+ return 0;
+}
+
+static int qede_get_dump_data(struct net_device *dev,
+ struct ethtool_dump *dump, void *buf)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ int rc = 0;
+
+ if (!edev->ops || !edev->ops->common) {
+ DP_ERR(edev, "Edev ops not populated\n");
+ rc = -EINVAL;
+ goto err;
+ }
+
+ switch (edev->dump_info.cmd) {
+ case QEDE_DUMP_CMD_NVM_CFG:
+ if (edev->dump_info.num_args != QEDE_DUMP_NVM_ARG_COUNT) {
+ DP_ERR(edev, "Arg count = %d required = %d\n",
+ edev->dump_info.num_args,
+ QEDE_DUMP_NVM_ARG_COUNT);
+ rc = -EINVAL;
+ goto err;
+ }
+ rc = edev->ops->common->read_nvm_cfg(edev->cdev, (u8 **)&buf,
+ edev->dump_info.args[0],
+ edev->dump_info.args[1]);
+ break;
+ case QEDE_DUMP_CMD_GRCDUMP:
+ memset(buf, 0, dump->len);
+ rc = edev->ops->common->dbg_all_data(edev->cdev, buf);
+ break;
+ default:
+ DP_ERR(edev, "Invalid cmd = %d\n", edev->dump_info.cmd);
+ rc = -EINVAL;
+ break;
+ }
+
+err:
+ edev->dump_info.cmd = QEDE_DUMP_CMD_NONE;
+ edev->dump_info.num_args = 0;
+ memset(edev->dump_info.args, 0, sizeof(edev->dump_info.args));
+
+ return rc;
+}
+
+int qede_set_per_coalesce(struct net_device *dev, u32 queue,
+ struct ethtool_coalesce *coal)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ struct qede_fastpath *fp;
+ u16 rxc, txc;
+ int rc = 0;
+
+ if (coal->rx_coalesce_usecs > QED_COALESCE_MAX ||
+ coal->tx_coalesce_usecs > QED_COALESCE_MAX) {
+ DP_INFO(edev,
+ "Can't support requested %s coalesce value [max supported value %d]\n",
+ coal->rx_coalesce_usecs > QED_COALESCE_MAX ? "rx"
+ : "tx",
+ QED_COALESCE_MAX);
+ return -EINVAL;
+ }
+
+ rxc = (u16)coal->rx_coalesce_usecs;
+ txc = (u16)coal->tx_coalesce_usecs;
+
+ __qede_lock(edev);
+ if (queue >= edev->num_queues) {
+ DP_INFO(edev, "Invalid queue\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (edev->state != QEDE_STATE_OPEN) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ fp = &edev->fp_array[queue];
+
+ if (edev->fp_array[queue].type & QEDE_FASTPATH_RX) {
+ rc = edev->ops->common->set_coalesce(edev->cdev,
+ rxc, 0,
+ fp->rxq->handle);
+ if (rc) {
+ DP_INFO(edev,
+ "Set RX coalesce error, rc = %d\n", rc);
+ goto out;
+ }
+ edev->coal_entry[queue].rxc = rxc;
+ edev->coal_entry[queue].isvalid = true;
+ }
+
+ if (edev->fp_array[queue].type & QEDE_FASTPATH_TX) {
+ rc = edev->ops->common->set_coalesce(edev->cdev,
+ 0, txc,
+ fp->txq->handle);
+ if (rc) {
+ DP_INFO(edev,
+ "Set TX coalesce error, rc = %d\n", rc);
+ goto out;
+ }
+ edev->coal_entry[queue].txc = txc;
+ edev->coal_entry[queue].isvalid = true;
+ }
+out:
+ __qede_unlock(edev);
+
+ return rc;
+}
+
+static int qede_get_per_coalesce(struct net_device *dev,
+ u32 queue,
+ struct ethtool_coalesce *coal)
+{
+ void *rx_handle = NULL, *tx_handle = NULL;
+ struct qede_dev *edev = netdev_priv(dev);
+ struct qede_fastpath *fp;
+ u16 rx_coal, tx_coal;
+ int rc = 0;
+
+ rx_coal = QED_DEFAULT_RX_USECS;
+ tx_coal = QED_DEFAULT_TX_USECS;
+
+ memset(coal, 0, sizeof(struct ethtool_coalesce));
+
+ __qede_lock(edev);
+ if (queue >= edev->num_queues) {
+ DP_INFO(edev, "Invalid queue\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (edev->state != QEDE_STATE_OPEN) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ fp = &edev->fp_array[queue];
+
+ if (fp->type & QEDE_FASTPATH_RX)
+ rx_handle = fp->rxq->handle;
+
+ rc = edev->ops->get_coalesce(edev->cdev, &rx_coal,
+ rx_handle);
+ if (rc) {
+ DP_INFO(edev, "Read Rx coalesce error\n");
+ goto out;
+ }
+
+ fp = &edev->fp_array[queue];
+ if (fp->type & QEDE_FASTPATH_TX)
+ tx_handle = fp->txq->handle;
+
+ rc = edev->ops->get_coalesce(edev->cdev, &tx_coal,
+ tx_handle);
+ if (rc)
+ DP_INFO(edev, "Read Tx coalesce error\n");
+
+out:
+ __qede_unlock(edev);
+
+ coal->rx_coalesce_usecs = rx_coal;
+ coal->tx_coalesce_usecs = tx_coal;
+
+ return rc;
+}
+
+static const struct ethtool_ops qede_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_STATS_BLOCK_USECS,
+ .get_link_ksettings = qede_get_link_ksettings,
+ .set_link_ksettings = qede_set_link_ksettings,
+ .get_drvinfo = qede_get_drvinfo,
+ .get_regs_len = qede_get_regs_len,
+ .get_regs = qede_get_regs,
+ .get_wol = qede_get_wol,
+ .set_wol = qede_set_wol,
+ .get_msglevel = qede_get_msglevel,
+ .set_msglevel = qede_set_msglevel,
+ .nway_reset = qede_nway_reset,
+ .get_link = qede_get_link,
+ .get_coalesce = qede_get_coalesce,
+ .set_coalesce = qede_set_coalesce,
+ .get_ringparam = qede_get_ringparam,
+ .set_ringparam = qede_set_ringparam,
+ .get_pauseparam = qede_get_pauseparam,
+ .set_pauseparam = qede_set_pauseparam,
+ .get_strings = qede_get_strings,
+ .set_phys_id = qede_set_phys_id,
+ .get_ethtool_stats = qede_get_ethtool_stats,
+ .get_priv_flags = qede_get_priv_flags,
+ .set_priv_flags = qede_set_priv_flags,
+ .get_sset_count = qede_get_sset_count,
+ .get_rxnfc = qede_get_rxnfc,
+ .set_rxnfc = qede_set_rxnfc,
+ .get_rxfh_indir_size = qede_get_rxfh_indir_size,
+ .get_rxfh_key_size = qede_get_rxfh_key_size,
+ .get_rxfh = qede_get_rxfh,
+ .set_rxfh = qede_set_rxfh,
+ .get_ts_info = qede_get_ts_info,
+ .get_channels = qede_get_channels,
+ .set_channels = qede_set_channels,
+ .self_test = qede_self_test,
+ .get_module_info = qede_get_module_info,
+ .get_module_eeprom = qede_get_module_eeprom,
+ .get_eee = qede_get_eee,
+ .set_eee = qede_set_eee,
+ .get_fecparam = qede_get_fecparam,
+ .set_fecparam = qede_set_fecparam,
+ .get_tunable = qede_get_tunable,
+ .set_tunable = qede_set_tunable,
+ .get_per_queue_coalesce = qede_get_per_coalesce,
+ .set_per_queue_coalesce = qede_set_per_coalesce,
+ .flash_device = qede_flash_device,
+ .get_dump_flag = qede_get_dump_flag,
+ .get_dump_data = qede_get_dump_data,
+ .set_dump = qede_set_dump,
+};
+
+static const struct ethtool_ops qede_vf_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_STATS_BLOCK_USECS,
+ .get_link_ksettings = qede_get_link_ksettings,
+ .get_drvinfo = qede_get_drvinfo,
+ .get_msglevel = qede_get_msglevel,
+ .set_msglevel = qede_set_msglevel,
+ .get_link = qede_get_link,
+ .get_coalesce = qede_get_coalesce,
+ .set_coalesce = qede_set_coalesce,
+ .get_ringparam = qede_get_ringparam,
+ .set_ringparam = qede_set_ringparam,
+ .get_strings = qede_get_strings,
+ .get_ethtool_stats = qede_get_ethtool_stats,
+ .get_priv_flags = qede_get_priv_flags,
+ .get_sset_count = qede_get_sset_count,
+ .get_rxnfc = qede_get_rxnfc,
+ .set_rxnfc = qede_set_rxnfc,
+ .get_rxfh_indir_size = qede_get_rxfh_indir_size,
+ .get_rxfh_key_size = qede_get_rxfh_key_size,
+ .get_rxfh = qede_get_rxfh,
+ .set_rxfh = qede_set_rxfh,
+ .get_channels = qede_get_channels,
+ .set_channels = qede_set_channels,
+ .get_per_queue_coalesce = qede_get_per_coalesce,
+ .set_per_queue_coalesce = qede_set_per_coalesce,
+ .get_tunable = qede_get_tunable,
+ .set_tunable = qede_set_tunable,
+};
+
+void qede_set_ethtool_ops(struct net_device *dev)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ if (IS_VF(edev))
+ dev->ethtool_ops = &qede_vf_ethtool_ops;
+ else
+ dev->ethtool_ops = &qede_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
new file mode 100644
index 000000000..3010833dd
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
@@ -0,0 +1,2074 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/* QLogic qede NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ * Copyright (c) 2019-2020 Marvell International Ltd.
+ */
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <net/udp_tunnel.h>
+#include <linux/bitops.h>
+#include <linux/vmalloc.h>
+
+#include <linux/qed/qed_if.h>
+#include "qede.h"
+
+#define QEDE_FILTER_PRINT_MAX_LEN (64)
+struct qede_arfs_tuple {
+ union {
+ __be32 src_ipv4;
+ struct in6_addr src_ipv6;
+ };
+ union {
+ __be32 dst_ipv4;
+ struct in6_addr dst_ipv6;
+ };
+ __be16 src_port;
+ __be16 dst_port;
+ __be16 eth_proto;
+ u8 ip_proto;
+
+ /* Describe filtering mode needed for this kind of filter */
+ enum qed_filter_config_mode mode;
+
+ /* Used to compare new/old filters. Return true if IPs match */
+ bool (*ip_comp)(struct qede_arfs_tuple *a, struct qede_arfs_tuple *b);
+
+ /* Given an address into ethhdr build a header from tuple info */
+ void (*build_hdr)(struct qede_arfs_tuple *t, void *header);
+
+ /* Stringify the tuple for a print into the provided buffer */
+ void (*stringify)(struct qede_arfs_tuple *t, void *buffer);
+};
+
+struct qede_arfs_fltr_node {
+#define QEDE_FLTR_VALID 0
+ unsigned long state;
+
+ /* pointer to aRFS packet buffer */
+ void *data;
+
+ /* dma map address of aRFS packet buffer */
+ dma_addr_t mapping;
+
+ /* length of aRFS packet buffer */
+ int buf_len;
+
+ /* tuples to hold from aRFS packet buffer */
+ struct qede_arfs_tuple tuple;
+
+ u32 flow_id;
+ u64 sw_id;
+ u16 rxq_id;
+ u16 next_rxq_id;
+ u8 vfid;
+ bool filter_op;
+ bool used;
+ u8 fw_rc;
+ bool b_is_drop;
+ struct hlist_node node;
+};
+
+struct qede_arfs {
+#define QEDE_ARFS_BUCKET_HEAD(edev, idx) (&(edev)->arfs->arfs_hl_head[idx])
+#define QEDE_ARFS_POLL_COUNT 100
+#define QEDE_RFS_FLW_BITSHIFT (4)
+#define QEDE_RFS_FLW_MASK ((1 << QEDE_RFS_FLW_BITSHIFT) - 1)
+ struct hlist_head arfs_hl_head[1 << QEDE_RFS_FLW_BITSHIFT];
+
+ /* lock for filter list access */
+ spinlock_t arfs_list_lock;
+ unsigned long *arfs_fltr_bmap;
+ int filter_count;
+
+ /* Currently configured filtering mode */
+ enum qed_filter_config_mode mode;
+};
+
+static void qede_configure_arfs_fltr(struct qede_dev *edev,
+ struct qede_arfs_fltr_node *n,
+ u16 rxq_id, bool add_fltr)
+{
+ const struct qed_eth_ops *op = edev->ops;
+ struct qed_ntuple_filter_params params;
+
+ if (n->used)
+ return;
+
+ memset(&params, 0, sizeof(params));
+
+ params.addr = n->mapping;
+ params.length = n->buf_len;
+ params.qid = rxq_id;
+ params.b_is_add = add_fltr;
+ params.b_is_drop = n->b_is_drop;
+
+ if (n->vfid) {
+ params.b_is_vf = true;
+ params.vf_id = n->vfid - 1;
+ }
+
+ if (n->tuple.stringify) {
+ char tuple_buffer[QEDE_FILTER_PRINT_MAX_LEN];
+
+ n->tuple.stringify(&n->tuple, tuple_buffer);
+ DP_VERBOSE(edev, NETIF_MSG_RX_STATUS,
+ "%s sw_id[0x%llx]: %s [vf %u queue %d]\n",
+ add_fltr ? "Adding" : "Deleting",
+ n->sw_id, tuple_buffer, n->vfid, rxq_id);
+ }
+
+ n->used = true;
+ n->filter_op = add_fltr;
+ op->ntuple_filter_config(edev->cdev, n, &params);
+}
+
+static void
+qede_free_arfs_filter(struct qede_dev *edev, struct qede_arfs_fltr_node *fltr)
+{
+ kfree(fltr->data);
+
+ if (fltr->sw_id < QEDE_RFS_MAX_FLTR)
+ clear_bit(fltr->sw_id, edev->arfs->arfs_fltr_bmap);
+
+ kfree(fltr);
+}
+
+static int
+qede_enqueue_fltr_and_config_searcher(struct qede_dev *edev,
+ struct qede_arfs_fltr_node *fltr,
+ u16 bucket_idx)
+{
+ fltr->mapping = dma_map_single(&edev->pdev->dev, fltr->data,
+ fltr->buf_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&edev->pdev->dev, fltr->mapping)) {
+ DP_NOTICE(edev, "Failed to map DMA memory for rule\n");
+ qede_free_arfs_filter(edev, fltr);
+ return -ENOMEM;
+ }
+
+ INIT_HLIST_NODE(&fltr->node);
+ hlist_add_head(&fltr->node,
+ QEDE_ARFS_BUCKET_HEAD(edev, bucket_idx));
+
+ edev->arfs->filter_count++;
+ if (edev->arfs->filter_count == 1 &&
+ edev->arfs->mode == QED_FILTER_CONFIG_MODE_DISABLE) {
+ edev->ops->configure_arfs_searcher(edev->cdev,
+ fltr->tuple.mode);
+ edev->arfs->mode = fltr->tuple.mode;
+ }
+
+ return 0;
+}
+
+static void
+qede_dequeue_fltr_and_config_searcher(struct qede_dev *edev,
+ struct qede_arfs_fltr_node *fltr)
+{
+ hlist_del(&fltr->node);
+ dma_unmap_single(&edev->pdev->dev, fltr->mapping,
+ fltr->buf_len, DMA_TO_DEVICE);
+
+ qede_free_arfs_filter(edev, fltr);
+
+ edev->arfs->filter_count--;
+ if (!edev->arfs->filter_count &&
+ edev->arfs->mode != QED_FILTER_CONFIG_MODE_DISABLE) {
+ enum qed_filter_config_mode mode;
+
+ mode = QED_FILTER_CONFIG_MODE_DISABLE;
+ edev->ops->configure_arfs_searcher(edev->cdev, mode);
+ edev->arfs->mode = QED_FILTER_CONFIG_MODE_DISABLE;
+ }
+}
+
+void qede_arfs_filter_op(void *dev, void *filter, u8 fw_rc)
+{
+ struct qede_arfs_fltr_node *fltr = filter;
+ struct qede_dev *edev = dev;
+
+ fltr->fw_rc = fw_rc;
+
+ if (fw_rc) {
+ DP_NOTICE(edev,
+ "Failed arfs filter configuration fw_rc=%d, flow_id=%d, sw_id=0x%llx, src_port=%d, dst_port=%d, rxq=%d\n",
+ fw_rc, fltr->flow_id, fltr->sw_id,
+ ntohs(fltr->tuple.src_port),
+ ntohs(fltr->tuple.dst_port), fltr->rxq_id);
+
+ spin_lock_bh(&edev->arfs->arfs_list_lock);
+
+ fltr->used = false;
+ clear_bit(QEDE_FLTR_VALID, &fltr->state);
+
+ spin_unlock_bh(&edev->arfs->arfs_list_lock);
+ return;
+ }
+
+ spin_lock_bh(&edev->arfs->arfs_list_lock);
+
+ fltr->used = false;
+
+ if (fltr->filter_op) {
+ set_bit(QEDE_FLTR_VALID, &fltr->state);
+ if (fltr->rxq_id != fltr->next_rxq_id)
+ qede_configure_arfs_fltr(edev, fltr, fltr->rxq_id,
+ false);
+ } else {
+ clear_bit(QEDE_FLTR_VALID, &fltr->state);
+ if (fltr->rxq_id != fltr->next_rxq_id) {
+ fltr->rxq_id = fltr->next_rxq_id;
+ qede_configure_arfs_fltr(edev, fltr,
+ fltr->rxq_id, true);
+ }
+ }
+
+ spin_unlock_bh(&edev->arfs->arfs_list_lock);
+}
+
+/* Should be called while qede_lock is held */
+void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr)
+{
+ int i;
+
+ for (i = 0; i <= QEDE_RFS_FLW_MASK; i++) {
+ struct hlist_node *temp;
+ struct hlist_head *head;
+ struct qede_arfs_fltr_node *fltr;
+
+ head = &edev->arfs->arfs_hl_head[i];
+
+ hlist_for_each_entry_safe(fltr, temp, head, node) {
+ bool del = false;
+
+ if (edev->state != QEDE_STATE_OPEN)
+ del = true;
+
+ spin_lock_bh(&edev->arfs->arfs_list_lock);
+
+ if ((!test_bit(QEDE_FLTR_VALID, &fltr->state) &&
+ !fltr->used) || free_fltr) {
+ qede_dequeue_fltr_and_config_searcher(edev,
+ fltr);
+ } else {
+ bool flow_exp = false;
+#ifdef CONFIG_RFS_ACCEL
+ flow_exp = rps_may_expire_flow(edev->ndev,
+ fltr->rxq_id,
+ fltr->flow_id,
+ fltr->sw_id);
+#endif
+ if ((flow_exp || del) && !free_fltr)
+ qede_configure_arfs_fltr(edev, fltr,
+ fltr->rxq_id,
+ false);
+ }
+
+ spin_unlock_bh(&edev->arfs->arfs_list_lock);
+ }
+ }
+
+#ifdef CONFIG_RFS_ACCEL
+ spin_lock_bh(&edev->arfs->arfs_list_lock);
+
+ if (edev->arfs->filter_count) {
+ set_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags);
+ schedule_delayed_work(&edev->sp_task,
+ QEDE_SP_TASK_POLL_DELAY);
+ }
+
+ spin_unlock_bh(&edev->arfs->arfs_list_lock);
+#endif
+}
+
+/* This function waits until all aRFS filters get deleted and freed.
+ * On timeout it frees all filters forcefully.
+ */
+void qede_poll_for_freeing_arfs_filters(struct qede_dev *edev)
+{
+ int count = QEDE_ARFS_POLL_COUNT;
+
+ while (count) {
+ qede_process_arfs_filters(edev, false);
+
+ if (!edev->arfs->filter_count)
+ break;
+
+ msleep(100);
+ count--;
+ }
+
+ if (!count) {
+ DP_NOTICE(edev, "Timeout in polling for arfs filter free\n");
+
+ /* Something is terribly wrong, free forcefully */
+ qede_process_arfs_filters(edev, true);
+ }
+}
+
+int qede_alloc_arfs(struct qede_dev *edev)
+{
+ int i;
+
+ if (!edev->dev_info.common.b_arfs_capable)
+ return -EINVAL;
+
+ edev->arfs = vzalloc(sizeof(*edev->arfs));
+ if (!edev->arfs)
+ return -ENOMEM;
+
+ spin_lock_init(&edev->arfs->arfs_list_lock);
+
+ for (i = 0; i <= QEDE_RFS_FLW_MASK; i++)
+ INIT_HLIST_HEAD(QEDE_ARFS_BUCKET_HEAD(edev, i));
+
+ edev->arfs->arfs_fltr_bmap =
+ vzalloc(array_size(sizeof(long),
+ BITS_TO_LONGS(QEDE_RFS_MAX_FLTR)));
+ if (!edev->arfs->arfs_fltr_bmap) {
+ vfree(edev->arfs);
+ edev->arfs = NULL;
+ return -ENOMEM;
+ }
+
+#ifdef CONFIG_RFS_ACCEL
+ edev->ndev->rx_cpu_rmap = alloc_irq_cpu_rmap(QEDE_RSS_COUNT(edev));
+ if (!edev->ndev->rx_cpu_rmap) {
+ vfree(edev->arfs->arfs_fltr_bmap);
+ edev->arfs->arfs_fltr_bmap = NULL;
+ vfree(edev->arfs);
+ edev->arfs = NULL;
+ return -ENOMEM;
+ }
+#endif
+ return 0;
+}
+
+void qede_free_arfs(struct qede_dev *edev)
+{
+ if (!edev->arfs)
+ return;
+
+#ifdef CONFIG_RFS_ACCEL
+ if (edev->ndev->rx_cpu_rmap)
+ free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap);
+
+ edev->ndev->rx_cpu_rmap = NULL;
+#endif
+ vfree(edev->arfs->arfs_fltr_bmap);
+ edev->arfs->arfs_fltr_bmap = NULL;
+ vfree(edev->arfs);
+ edev->arfs = NULL;
+}
+
+#ifdef CONFIG_RFS_ACCEL
+static bool qede_compare_ip_addr(struct qede_arfs_fltr_node *tpos,
+ const struct sk_buff *skb)
+{
+ if (skb->protocol == htons(ETH_P_IP)) {
+ if (tpos->tuple.src_ipv4 == ip_hdr(skb)->saddr &&
+ tpos->tuple.dst_ipv4 == ip_hdr(skb)->daddr)
+ return true;
+ else
+ return false;
+ } else {
+ struct in6_addr *src = &tpos->tuple.src_ipv6;
+ u8 size = sizeof(struct in6_addr);
+
+ if (!memcmp(src, &ipv6_hdr(skb)->saddr, size) &&
+ !memcmp(&tpos->tuple.dst_ipv6, &ipv6_hdr(skb)->daddr, size))
+ return true;
+ else
+ return false;
+ }
+}
+
+static struct qede_arfs_fltr_node *
+qede_arfs_htbl_key_search(struct hlist_head *h, const struct sk_buff *skb,
+ __be16 src_port, __be16 dst_port, u8 ip_proto)
+{
+ struct qede_arfs_fltr_node *tpos;
+
+ hlist_for_each_entry(tpos, h, node)
+ if (tpos->tuple.ip_proto == ip_proto &&
+ tpos->tuple.eth_proto == skb->protocol &&
+ qede_compare_ip_addr(tpos, skb) &&
+ tpos->tuple.src_port == src_port &&
+ tpos->tuple.dst_port == dst_port)
+ return tpos;
+
+ return NULL;
+}
+
+static struct qede_arfs_fltr_node *
+qede_alloc_filter(struct qede_dev *edev, int min_hlen)
+{
+ struct qede_arfs_fltr_node *n;
+ int bit_id;
+
+ bit_id = find_first_zero_bit(edev->arfs->arfs_fltr_bmap,
+ QEDE_RFS_MAX_FLTR);
+
+ if (bit_id >= QEDE_RFS_MAX_FLTR)
+ return NULL;
+
+ n = kzalloc(sizeof(*n), GFP_ATOMIC);
+ if (!n)
+ return NULL;
+
+ n->data = kzalloc(min_hlen, GFP_ATOMIC);
+ if (!n->data) {
+ kfree(n);
+ return NULL;
+ }
+
+ n->sw_id = (u16)bit_id;
+ set_bit(bit_id, edev->arfs->arfs_fltr_bmap);
+ return n;
+}
+
+int qede_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
+ u16 rxq_index, u32 flow_id)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ struct qede_arfs_fltr_node *n;
+ int min_hlen, rc, tp_offset;
+ struct ethhdr *eth;
+ __be16 *ports;
+ u16 tbl_idx;
+ u8 ip_proto;
+
+ if (skb->encapsulation)
+ return -EPROTONOSUPPORT;
+
+ if (skb->protocol != htons(ETH_P_IP) &&
+ skb->protocol != htons(ETH_P_IPV6))
+ return -EPROTONOSUPPORT;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ ip_proto = ip_hdr(skb)->protocol;
+ tp_offset = sizeof(struct iphdr);
+ } else {
+ ip_proto = ipv6_hdr(skb)->nexthdr;
+ tp_offset = sizeof(struct ipv6hdr);
+ }
+
+ if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP)
+ return -EPROTONOSUPPORT;
+
+ ports = (__be16 *)(skb->data + tp_offset);
+ tbl_idx = skb_get_hash_raw(skb) & QEDE_RFS_FLW_MASK;
+
+ spin_lock_bh(&edev->arfs->arfs_list_lock);
+
+ n = qede_arfs_htbl_key_search(QEDE_ARFS_BUCKET_HEAD(edev, tbl_idx),
+ skb, ports[0], ports[1], ip_proto);
+ if (n) {
+ /* Filter match */
+ n->next_rxq_id = rxq_index;
+
+ if (test_bit(QEDE_FLTR_VALID, &n->state)) {
+ if (n->rxq_id != rxq_index)
+ qede_configure_arfs_fltr(edev, n, n->rxq_id,
+ false);
+ } else {
+ if (!n->used) {
+ n->rxq_id = rxq_index;
+ qede_configure_arfs_fltr(edev, n, n->rxq_id,
+ true);
+ }
+ }
+
+ rc = n->sw_id;
+ goto ret_unlock;
+ }
+
+ min_hlen = ETH_HLEN + skb_headlen(skb);
+
+ n = qede_alloc_filter(edev, min_hlen);
+ if (!n) {
+ rc = -ENOMEM;
+ goto ret_unlock;
+ }
+
+ n->buf_len = min_hlen;
+ n->rxq_id = rxq_index;
+ n->next_rxq_id = rxq_index;
+ n->tuple.src_port = ports[0];
+ n->tuple.dst_port = ports[1];
+ n->flow_id = flow_id;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ n->tuple.src_ipv4 = ip_hdr(skb)->saddr;
+ n->tuple.dst_ipv4 = ip_hdr(skb)->daddr;
+ } else {
+ memcpy(&n->tuple.src_ipv6, &ipv6_hdr(skb)->saddr,
+ sizeof(struct in6_addr));
+ memcpy(&n->tuple.dst_ipv6, &ipv6_hdr(skb)->daddr,
+ sizeof(struct in6_addr));
+ }
+
+ eth = (struct ethhdr *)n->data;
+ eth->h_proto = skb->protocol;
+ n->tuple.eth_proto = skb->protocol;
+ n->tuple.ip_proto = ip_proto;
+ n->tuple.mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
+ memcpy(n->data + ETH_HLEN, skb->data, skb_headlen(skb));
+
+ rc = qede_enqueue_fltr_and_config_searcher(edev, n, tbl_idx);
+ if (rc)
+ goto ret_unlock;
+
+ qede_configure_arfs_fltr(edev, n, n->rxq_id, true);
+
+ spin_unlock_bh(&edev->arfs->arfs_list_lock);
+
+ set_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags);
+ schedule_delayed_work(&edev->sp_task, 0);
+
+ return n->sw_id;
+
+ret_unlock:
+ spin_unlock_bh(&edev->arfs->arfs_list_lock);
+ return rc;
+}
+#endif
+
+void qede_udp_ports_update(void *dev, u16 vxlan_port, u16 geneve_port)
+{
+ struct qede_dev *edev = dev;
+
+ if (edev->vxlan_dst_port != vxlan_port)
+ edev->vxlan_dst_port = 0;
+
+ if (edev->geneve_dst_port != geneve_port)
+ edev->geneve_dst_port = 0;
+}
+
+void qede_force_mac(void *dev, u8 *mac, bool forced)
+{
+ struct qede_dev *edev = dev;
+
+ __qede_lock(edev);
+
+ if (!is_valid_ether_addr(mac)) {
+ __qede_unlock(edev);
+ return;
+ }
+
+ eth_hw_addr_set(edev->ndev, mac);
+ __qede_unlock(edev);
+}
+
+void qede_fill_rss_params(struct qede_dev *edev,
+ struct qed_update_vport_rss_params *rss, u8 *update)
+{
+ bool need_reset = false;
+ int i;
+
+ if (QEDE_RSS_COUNT(edev) <= 1) {
+ memset(rss, 0, sizeof(*rss));
+ *update = 0;
+ return;
+ }
+
+ /* Need to validate current RSS config uses valid entries */
+ for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
+ if (edev->rss_ind_table[i] >= QEDE_RSS_COUNT(edev)) {
+ need_reset = true;
+ break;
+ }
+ }
+
+ if (!(edev->rss_params_inited & QEDE_RSS_INDIR_INITED) || need_reset) {
+ for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
+ u16 indir_val, val;
+
+ val = QEDE_RSS_COUNT(edev);
+ indir_val = ethtool_rxfh_indir_default(i, val);
+ edev->rss_ind_table[i] = indir_val;
+ }
+ edev->rss_params_inited |= QEDE_RSS_INDIR_INITED;
+ }
+
+ /* Now that we have the queue-indirection, prepare the handles */
+ for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
+ u16 idx = QEDE_RX_QUEUE_IDX(edev, edev->rss_ind_table[i]);
+
+ rss->rss_ind_table[i] = edev->fp_array[idx].rxq->handle;
+ }
+
+ if (!(edev->rss_params_inited & QEDE_RSS_KEY_INITED)) {
+ netdev_rss_key_fill(edev->rss_key, sizeof(edev->rss_key));
+ edev->rss_params_inited |= QEDE_RSS_KEY_INITED;
+ }
+ memcpy(rss->rss_key, edev->rss_key, sizeof(rss->rss_key));
+
+ if (!(edev->rss_params_inited & QEDE_RSS_CAPS_INITED)) {
+ edev->rss_caps = QED_RSS_IPV4 | QED_RSS_IPV6 |
+ QED_RSS_IPV4_TCP | QED_RSS_IPV6_TCP;
+ edev->rss_params_inited |= QEDE_RSS_CAPS_INITED;
+ }
+ rss->rss_caps = edev->rss_caps;
+
+ *update = 1;
+}
+
+static int qede_set_ucast_rx_mac(struct qede_dev *edev,
+ enum qed_filter_xcast_params_type opcode,
+ const unsigned char mac[ETH_ALEN])
+{
+ struct qed_filter_ucast_params ucast;
+
+ memset(&ucast, 0, sizeof(ucast));
+ ucast.type = opcode;
+ ucast.mac_valid = 1;
+ ether_addr_copy(ucast.mac, mac);
+
+ return edev->ops->filter_config_ucast(edev->cdev, &ucast);
+}
+
+static int qede_set_ucast_rx_vlan(struct qede_dev *edev,
+ enum qed_filter_xcast_params_type opcode,
+ u16 vid)
+{
+ struct qed_filter_ucast_params ucast;
+
+ memset(&ucast, 0, sizeof(ucast));
+ ucast.type = opcode;
+ ucast.vlan_valid = 1;
+ ucast.vlan = vid;
+
+ return edev->ops->filter_config_ucast(edev->cdev, &ucast);
+}
+
+static int qede_config_accept_any_vlan(struct qede_dev *edev, bool action)
+{
+ struct qed_update_vport_params *params;
+ int rc;
+
+ /* Proceed only if action actually needs to be performed */
+ if (edev->accept_any_vlan == action)
+ return 0;
+
+ params = vzalloc(sizeof(*params));
+ if (!params)
+ return -ENOMEM;
+
+ params->vport_id = 0;
+ params->accept_any_vlan = action;
+ params->update_accept_any_vlan_flg = 1;
+
+ rc = edev->ops->vport_update(edev->cdev, params);
+ if (rc) {
+ DP_ERR(edev, "Failed to %s accept-any-vlan\n",
+ action ? "enable" : "disable");
+ } else {
+ DP_INFO(edev, "%s accept-any-vlan\n",
+ action ? "enabled" : "disabled");
+ edev->accept_any_vlan = action;
+ }
+
+ vfree(params);
+ return 0;
+}
+
+int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ struct qede_vlan *vlan, *tmp;
+ int rc = 0;
+
+ DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan 0x%04x\n", vid);
+
+ vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
+ if (!vlan) {
+ DP_INFO(edev, "Failed to allocate struct for vlan\n");
+ return -ENOMEM;
+ }
+ INIT_LIST_HEAD(&vlan->list);
+ vlan->vid = vid;
+ vlan->configured = false;
+
+ /* Verify vlan isn't already configured */
+ list_for_each_entry(tmp, &edev->vlan_list, list) {
+ if (tmp->vid == vlan->vid) {
+ DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
+ "vlan already configured\n");
+ kfree(vlan);
+ return -EEXIST;
+ }
+ }
+
+ /* If interface is down, cache this VLAN ID and return */
+ __qede_lock(edev);
+ if (edev->state != QEDE_STATE_OPEN) {
+ DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
+ "Interface is down, VLAN %d will be configured when interface is up\n",
+ vid);
+ if (vid != 0)
+ edev->non_configured_vlans++;
+ list_add(&vlan->list, &edev->vlan_list);
+ goto out;
+ }
+
+ /* Check for the filter limit.
+ * Note - vlan0 has a reserved filter and can be added without
+ * worrying about quota
+ */
+ if ((edev->configured_vlans < edev->dev_info.num_vlan_filters) ||
+ (vlan->vid == 0)) {
+ rc = qede_set_ucast_rx_vlan(edev,
+ QED_FILTER_XCAST_TYPE_ADD,
+ vlan->vid);
+ if (rc) {
+ DP_ERR(edev, "Failed to configure VLAN %d\n",
+ vlan->vid);
+ kfree(vlan);
+ goto out;
+ }
+ vlan->configured = true;
+
+ /* vlan0 filter isn't consuming out of our quota */
+ if (vlan->vid != 0)
+ edev->configured_vlans++;
+ } else {
+ /* Out of quota; Activate accept-any-VLAN mode */
+ if (!edev->non_configured_vlans) {
+ rc = qede_config_accept_any_vlan(edev, true);
+ if (rc) {
+ kfree(vlan);
+ goto out;
+ }
+ }
+
+ edev->non_configured_vlans++;
+ }
+
+ list_add(&vlan->list, &edev->vlan_list);
+
+out:
+ __qede_unlock(edev);
+ return rc;
+}
+
+static void qede_del_vlan_from_list(struct qede_dev *edev,
+ struct qede_vlan *vlan)
+{
+ /* vlan0 filter isn't consuming out of our quota */
+ if (vlan->vid != 0) {
+ if (vlan->configured)
+ edev->configured_vlans--;
+ else
+ edev->non_configured_vlans--;
+ }
+
+ list_del(&vlan->list);
+ kfree(vlan);
+}
+
+int qede_configure_vlan_filters(struct qede_dev *edev)
+{
+ int rc = 0, real_rc = 0, accept_any_vlan = 0;
+ struct qed_dev_eth_info *dev_info;
+ struct qede_vlan *vlan = NULL;
+
+ if (list_empty(&edev->vlan_list))
+ return 0;
+
+ dev_info = &edev->dev_info;
+
+ /* Configure non-configured vlans */
+ list_for_each_entry(vlan, &edev->vlan_list, list) {
+ if (vlan->configured)
+ continue;
+
+ /* We have used all our credits, now enable accept_any_vlan */
+ if ((vlan->vid != 0) &&
+ (edev->configured_vlans == dev_info->num_vlan_filters)) {
+ accept_any_vlan = 1;
+ continue;
+ }
+
+ DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan %d\n", vlan->vid);
+
+ rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_ADD,
+ vlan->vid);
+ if (rc) {
+ DP_ERR(edev, "Failed to configure VLAN %u\n",
+ vlan->vid);
+ real_rc = rc;
+ continue;
+ }
+
+ vlan->configured = true;
+ /* vlan0 filter doesn't consume our VLAN filter's quota */
+ if (vlan->vid != 0) {
+ edev->non_configured_vlans--;
+ edev->configured_vlans++;
+ }
+ }
+
+ /* enable accept_any_vlan mode if we have more VLANs than credits,
+ * or remove accept_any_vlan mode if we've actually removed
+ * a non-configured vlan, and all remaining vlans are truly configured.
+ */
+
+ if (accept_any_vlan)
+ rc = qede_config_accept_any_vlan(edev, true);
+ else if (!edev->non_configured_vlans)
+ rc = qede_config_accept_any_vlan(edev, false);
+
+ if (rc && !real_rc)
+ real_rc = rc;
+
+ return real_rc;
+}
+
+int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ struct qede_vlan *vlan;
+ int rc = 0;
+
+ DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "Removing vlan 0x%04x\n", vid);
+
+ /* Find whether entry exists */
+ __qede_lock(edev);
+ list_for_each_entry(vlan, &edev->vlan_list, list)
+ if (vlan->vid == vid)
+ break;
+
+ if (list_entry_is_head(vlan, &edev->vlan_list, list)) {
+ DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
+ "Vlan isn't configured\n");
+ goto out;
+ }
+
+ if (edev->state != QEDE_STATE_OPEN) {
+ /* As interface is already down, we don't have a VPORT
+ * instance to remove vlan filter. So just update vlan list
+ */
+ DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
+ "Interface is down, removing VLAN from list only\n");
+ qede_del_vlan_from_list(edev, vlan);
+ goto out;
+ }
+
+ /* Remove vlan */
+ if (vlan->configured) {
+ rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_DEL,
+ vid);
+ if (rc) {
+ DP_ERR(edev, "Failed to remove VLAN %d\n", vid);
+ goto out;
+ }
+ }
+
+ qede_del_vlan_from_list(edev, vlan);
+
+ /* We have removed a VLAN - try to see if we can
+ * configure non-configured VLAN from the list.
+ */
+ rc = qede_configure_vlan_filters(edev);
+
+out:
+ __qede_unlock(edev);
+ return rc;
+}
+
+void qede_vlan_mark_nonconfigured(struct qede_dev *edev)
+{
+ struct qede_vlan *vlan = NULL;
+
+ if (list_empty(&edev->vlan_list))
+ return;
+
+ list_for_each_entry(vlan, &edev->vlan_list, list) {
+ if (!vlan->configured)
+ continue;
+
+ vlan->configured = false;
+
+ /* vlan0 filter isn't consuming out of our quota */
+ if (vlan->vid != 0) {
+ edev->non_configured_vlans++;
+ edev->configured_vlans--;
+ }
+
+ DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
+ "marked vlan %d as non-configured\n", vlan->vid);
+ }
+
+ edev->accept_any_vlan = false;
+}
+
+static void qede_set_features_reload(struct qede_dev *edev,
+ struct qede_reload_args *args)
+{
+ edev->ndev->features = args->u.features;
+}
+
+netdev_features_t qede_fix_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ if (edev->xdp_prog || edev->ndev->mtu > PAGE_SIZE ||
+ !(features & NETIF_F_GRO))
+ features &= ~NETIF_F_GRO_HW;
+
+ return features;
+}
+
+int qede_set_features(struct net_device *dev, netdev_features_t features)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ netdev_features_t changes = features ^ dev->features;
+ bool need_reload = false;
+
+ if (changes & NETIF_F_GRO_HW)
+ need_reload = true;
+
+ if (need_reload) {
+ struct qede_reload_args args;
+
+ args.u.features = features;
+ args.func = &qede_set_features_reload;
+
+ /* Make sure that we definitely need to reload.
+ * In case of an eBPF attached program, there will be no FW
+ * aggregations, so no need to actually reload.
+ */
+ __qede_lock(edev);
+ if (edev->xdp_prog)
+ args.func(edev, &args);
+ else
+ qede_reload(edev, &args, true);
+ __qede_unlock(edev);
+
+ return 1;
+ }
+
+ return 0;
+}
+
+static int qede_udp_tunnel_sync(struct net_device *dev, unsigned int table)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ struct qed_tunn_params tunn_params;
+ struct udp_tunnel_info ti;
+ u16 *save_port;
+ int rc;
+
+ memset(&tunn_params, 0, sizeof(tunn_params));
+
+ udp_tunnel_nic_get_port(dev, table, 0, &ti);
+ if (ti.type == UDP_TUNNEL_TYPE_VXLAN) {
+ tunn_params.update_vxlan_port = 1;
+ tunn_params.vxlan_port = ntohs(ti.port);
+ save_port = &edev->vxlan_dst_port;
+ } else {
+ tunn_params.update_geneve_port = 1;
+ tunn_params.geneve_port = ntohs(ti.port);
+ save_port = &edev->geneve_dst_port;
+ }
+
+ __qede_lock(edev);
+ rc = edev->ops->tunn_config(edev->cdev, &tunn_params);
+ __qede_unlock(edev);
+ if (rc)
+ return rc;
+
+ *save_port = ntohs(ti.port);
+ return 0;
+}
+
+static const struct udp_tunnel_nic_info qede_udp_tunnels_both = {
+ .sync_table = qede_udp_tunnel_sync,
+ .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP,
+ .tables = {
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
+ },
+}, qede_udp_tunnels_vxlan = {
+ .sync_table = qede_udp_tunnel_sync,
+ .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP,
+ .tables = {
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
+ },
+}, qede_udp_tunnels_geneve = {
+ .sync_table = qede_udp_tunnel_sync,
+ .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP,
+ .tables = {
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
+ },
+};
+
+void qede_set_udp_tunnels(struct qede_dev *edev)
+{
+ if (edev->dev_info.common.vxlan_enable &&
+ edev->dev_info.common.geneve_enable)
+ edev->ndev->udp_tunnel_nic_info = &qede_udp_tunnels_both;
+ else if (edev->dev_info.common.vxlan_enable)
+ edev->ndev->udp_tunnel_nic_info = &qede_udp_tunnels_vxlan;
+ else if (edev->dev_info.common.geneve_enable)
+ edev->ndev->udp_tunnel_nic_info = &qede_udp_tunnels_geneve;
+}
+
+static void qede_xdp_reload_func(struct qede_dev *edev,
+ struct qede_reload_args *args)
+{
+ struct bpf_prog *old;
+
+ old = xchg(&edev->xdp_prog, args->u.new_prog);
+ if (old)
+ bpf_prog_put(old);
+}
+
+static int qede_xdp_set(struct qede_dev *edev, struct bpf_prog *prog)
+{
+ struct qede_reload_args args;
+
+ /* If we're called, there was already a bpf reference increment */
+ args.func = &qede_xdp_reload_func;
+ args.u.new_prog = prog;
+ qede_reload(edev, &args, false);
+
+ return 0;
+}
+
+int qede_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return qede_xdp_set(edev, xdp->prog);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int qede_set_mcast_rx_mac(struct qede_dev *edev,
+ enum qed_filter_xcast_params_type opcode,
+ unsigned char *mac, int num_macs)
+{
+ struct qed_filter_mcast_params mcast;
+ int i;
+
+ memset(&mcast, 0, sizeof(mcast));
+ mcast.type = opcode;
+ mcast.num = num_macs;
+
+ for (i = 0; i < num_macs; i++, mac += ETH_ALEN)
+ ether_addr_copy(mcast.mac[i], mac);
+
+ return edev->ops->filter_config_mcast(edev->cdev, &mcast);
+}
+
+int qede_set_mac_addr(struct net_device *ndev, void *p)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+ struct sockaddr *addr = p;
+ int rc = 0;
+
+ /* Make sure the state doesn't transition while changing the MAC.
+ * Also, all flows accessing the dev_addr field are doing that under
+ * this lock.
+ */
+ __qede_lock(edev);
+
+ if (!is_valid_ether_addr(addr->sa_data)) {
+ DP_NOTICE(edev, "The MAC address is not valid\n");
+ rc = -EFAULT;
+ goto out;
+ }
+
+ if (!edev->ops->check_mac(edev->cdev, addr->sa_data)) {
+ DP_NOTICE(edev, "qed prevents setting MAC %pM\n",
+ addr->sa_data);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (edev->state == QEDE_STATE_OPEN) {
+ /* Remove the previous primary mac */
+ rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
+ ndev->dev_addr);
+ if (rc)
+ goto out;
+ }
+
+ eth_hw_addr_set(ndev, addr->sa_data);
+ DP_INFO(edev, "Setting device MAC to %pM\n", addr->sa_data);
+
+ if (edev->state != QEDE_STATE_OPEN) {
+ DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
+ "The device is currently down\n");
+ /* Ask PF to explicitly update a copy in bulletin board */
+ if (IS_VF(edev) && edev->ops->req_bulletin_update_mac)
+ edev->ops->req_bulletin_update_mac(edev->cdev,
+ ndev->dev_addr);
+ goto out;
+ }
+
+ edev->ops->common->update_mac(edev->cdev, ndev->dev_addr);
+
+ rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
+ ndev->dev_addr);
+out:
+ __qede_unlock(edev);
+ return rc;
+}
+
+static int
+qede_configure_mcast_filtering(struct net_device *ndev,
+ enum qed_filter_rx_mode_type *accept_flags)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+ unsigned char *mc_macs, *temp;
+ struct netdev_hw_addr *ha;
+ int rc = 0, mc_count;
+ size_t size;
+
+ size = 64 * ETH_ALEN;
+
+ mc_macs = kzalloc(size, GFP_KERNEL);
+ if (!mc_macs) {
+ DP_NOTICE(edev,
+ "Failed to allocate memory for multicast MACs\n");
+ rc = -ENOMEM;
+ goto exit;
+ }
+
+ temp = mc_macs;
+
+ /* Remove all previously configured MAC filters */
+ rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
+ mc_macs, 1);
+ if (rc)
+ goto exit;
+
+ netif_addr_lock_bh(ndev);
+
+ mc_count = netdev_mc_count(ndev);
+ if (mc_count <= 64) {
+ netdev_for_each_mc_addr(ha, ndev) {
+ ether_addr_copy(temp, ha->addr);
+ temp += ETH_ALEN;
+ }
+ }
+
+ netif_addr_unlock_bh(ndev);
+
+ /* Check for all multicast @@@TBD resource allocation */
+ if ((ndev->flags & IFF_ALLMULTI) || (mc_count > 64)) {
+ if (*accept_flags == QED_FILTER_RX_MODE_TYPE_REGULAR)
+ *accept_flags = QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
+ } else {
+ /* Add all multicast MAC filters */
+ rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
+ mc_macs, mc_count);
+ }
+
+exit:
+ kfree(mc_macs);
+ return rc;
+}
+
+void qede_set_rx_mode(struct net_device *ndev)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+
+ set_bit(QEDE_SP_RX_MODE, &edev->sp_flags);
+ schedule_delayed_work(&edev->sp_task, 0);
+}
+
+/* Must be called with qede_lock held */
+void qede_config_rx_mode(struct net_device *ndev)
+{
+ enum qed_filter_rx_mode_type accept_flags;
+ struct qede_dev *edev = netdev_priv(ndev);
+ unsigned char *uc_macs, *temp;
+ struct netdev_hw_addr *ha;
+ int rc, uc_count;
+ size_t size;
+
+ netif_addr_lock_bh(ndev);
+
+ uc_count = netdev_uc_count(ndev);
+ size = uc_count * ETH_ALEN;
+
+ uc_macs = kzalloc(size, GFP_ATOMIC);
+ if (!uc_macs) {
+ DP_NOTICE(edev, "Failed to allocate memory for unicast MACs\n");
+ netif_addr_unlock_bh(ndev);
+ return;
+ }
+
+ temp = uc_macs;
+ netdev_for_each_uc_addr(ha, ndev) {
+ ether_addr_copy(temp, ha->addr);
+ temp += ETH_ALEN;
+ }
+
+ netif_addr_unlock_bh(ndev);
+
+ /* Remove all previous unicast secondary macs and multicast macs
+ * (configure / leave the primary mac)
+ */
+ rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_REPLACE,
+ edev->ndev->dev_addr);
+ if (rc)
+ goto out;
+
+ /* Check for promiscuous */
+ if (ndev->flags & IFF_PROMISC)
+ accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
+ else
+ accept_flags = QED_FILTER_RX_MODE_TYPE_REGULAR;
+
+ /* Configure all filters regardless, in case promisc is rejected */
+ if (uc_count < edev->dev_info.num_mac_filters) {
+ int i;
+
+ temp = uc_macs;
+ for (i = 0; i < uc_count; i++) {
+ rc = qede_set_ucast_rx_mac(edev,
+ QED_FILTER_XCAST_TYPE_ADD,
+ temp);
+ if (rc)
+ goto out;
+
+ temp += ETH_ALEN;
+ }
+ } else {
+ accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
+ }
+
+ rc = qede_configure_mcast_filtering(ndev, &accept_flags);
+ if (rc)
+ goto out;
+
+ /* take care of VLAN mode */
+ if (ndev->flags & IFF_PROMISC) {
+ qede_config_accept_any_vlan(edev, true);
+ } else if (!edev->non_configured_vlans) {
+ /* It's possible that accept_any_vlan mode is set due to a
+ * previous setting of IFF_PROMISC. If vlan credits are
+ * sufficient, disable accept_any_vlan.
+ */
+ qede_config_accept_any_vlan(edev, false);
+ }
+
+ edev->ops->filter_config_rx_mode(edev->cdev, accept_flags);
+out:
+ kfree(uc_macs);
+}
+
+static struct qede_arfs_fltr_node *
+qede_get_arfs_fltr_by_loc(struct hlist_head *head, u64 location)
+{
+ struct qede_arfs_fltr_node *fltr;
+
+ hlist_for_each_entry(fltr, head, node)
+ if (location == fltr->sw_id)
+ return fltr;
+
+ return NULL;
+}
+
+int qede_get_cls_rule_all(struct qede_dev *edev, struct ethtool_rxnfc *info,
+ u32 *rule_locs)
+{
+ struct qede_arfs_fltr_node *fltr;
+ struct hlist_head *head;
+ int cnt = 0, rc = 0;
+
+ info->data = QEDE_RFS_MAX_FLTR;
+
+ __qede_lock(edev);
+
+ if (!edev->arfs) {
+ rc = -EPERM;
+ goto unlock;
+ }
+
+ head = QEDE_ARFS_BUCKET_HEAD(edev, 0);
+
+ hlist_for_each_entry(fltr, head, node) {
+ if (cnt == info->rule_cnt) {
+ rc = -EMSGSIZE;
+ goto unlock;
+ }
+
+ rule_locs[cnt] = fltr->sw_id;
+ cnt++;
+ }
+
+ info->rule_cnt = cnt;
+
+unlock:
+ __qede_unlock(edev);
+ return rc;
+}
+
+int qede_get_cls_rule_entry(struct qede_dev *edev, struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp = &cmd->fs;
+ struct qede_arfs_fltr_node *fltr = NULL;
+ int rc = 0;
+
+ cmd->data = QEDE_RFS_MAX_FLTR;
+
+ __qede_lock(edev);
+
+ if (!edev->arfs) {
+ rc = -EPERM;
+ goto unlock;
+ }
+
+ fltr = qede_get_arfs_fltr_by_loc(QEDE_ARFS_BUCKET_HEAD(edev, 0),
+ fsp->location);
+ if (!fltr) {
+ DP_NOTICE(edev, "Rule not found - location=0x%x\n",
+ fsp->location);
+ rc = -EINVAL;
+ goto unlock;
+ }
+
+ if (fltr->tuple.eth_proto == htons(ETH_P_IP)) {
+ if (fltr->tuple.ip_proto == IPPROTO_TCP)
+ fsp->flow_type = TCP_V4_FLOW;
+ else
+ fsp->flow_type = UDP_V4_FLOW;
+
+ fsp->h_u.tcp_ip4_spec.psrc = fltr->tuple.src_port;
+ fsp->h_u.tcp_ip4_spec.pdst = fltr->tuple.dst_port;
+ fsp->h_u.tcp_ip4_spec.ip4src = fltr->tuple.src_ipv4;
+ fsp->h_u.tcp_ip4_spec.ip4dst = fltr->tuple.dst_ipv4;
+ } else {
+ if (fltr->tuple.ip_proto == IPPROTO_TCP)
+ fsp->flow_type = TCP_V6_FLOW;
+ else
+ fsp->flow_type = UDP_V6_FLOW;
+ fsp->h_u.tcp_ip6_spec.psrc = fltr->tuple.src_port;
+ fsp->h_u.tcp_ip6_spec.pdst = fltr->tuple.dst_port;
+ memcpy(&fsp->h_u.tcp_ip6_spec.ip6src,
+ &fltr->tuple.src_ipv6, sizeof(struct in6_addr));
+ memcpy(&fsp->h_u.tcp_ip6_spec.ip6dst,
+ &fltr->tuple.dst_ipv6, sizeof(struct in6_addr));
+ }
+
+ fsp->ring_cookie = fltr->rxq_id;
+
+ if (fltr->vfid) {
+ fsp->ring_cookie |= ((u64)fltr->vfid) <<
+ ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
+ }
+
+ if (fltr->b_is_drop)
+ fsp->ring_cookie = RX_CLS_FLOW_DISC;
+unlock:
+ __qede_unlock(edev);
+ return rc;
+}
+
+static int
+qede_poll_arfs_filter_config(struct qede_dev *edev,
+ struct qede_arfs_fltr_node *fltr)
+{
+ int count = QEDE_ARFS_POLL_COUNT;
+
+ while (fltr->used && count) {
+ msleep(20);
+ count--;
+ }
+
+ if (count == 0 || fltr->fw_rc) {
+ DP_NOTICE(edev, "Timeout in polling filter config\n");
+ qede_dequeue_fltr_and_config_searcher(edev, fltr);
+ return -EIO;
+ }
+
+ return fltr->fw_rc;
+}
+
+static int qede_flow_get_min_header_size(struct qede_arfs_tuple *t)
+{
+ int size = ETH_HLEN;
+
+ if (t->eth_proto == htons(ETH_P_IP))
+ size += sizeof(struct iphdr);
+ else
+ size += sizeof(struct ipv6hdr);
+
+ if (t->ip_proto == IPPROTO_TCP)
+ size += sizeof(struct tcphdr);
+ else
+ size += sizeof(struct udphdr);
+
+ return size;
+}
+
+static bool qede_flow_spec_ipv4_cmp(struct qede_arfs_tuple *a,
+ struct qede_arfs_tuple *b)
+{
+ if (a->eth_proto != htons(ETH_P_IP) ||
+ b->eth_proto != htons(ETH_P_IP))
+ return false;
+
+ return (a->src_ipv4 == b->src_ipv4) &&
+ (a->dst_ipv4 == b->dst_ipv4);
+}
+
+static void qede_flow_build_ipv4_hdr(struct qede_arfs_tuple *t,
+ void *header)
+{
+ __be16 *ports = (__be16 *)(header + ETH_HLEN + sizeof(struct iphdr));
+ struct iphdr *ip = (struct iphdr *)(header + ETH_HLEN);
+ struct ethhdr *eth = (struct ethhdr *)header;
+
+ eth->h_proto = t->eth_proto;
+ ip->saddr = t->src_ipv4;
+ ip->daddr = t->dst_ipv4;
+ ip->version = 0x4;
+ ip->ihl = 0x5;
+ ip->protocol = t->ip_proto;
+ ip->tot_len = cpu_to_be16(qede_flow_get_min_header_size(t) - ETH_HLEN);
+
+ /* ports is weakly typed to suit both TCP and UDP ports */
+ ports[0] = t->src_port;
+ ports[1] = t->dst_port;
+}
+
+static void qede_flow_stringify_ipv4_hdr(struct qede_arfs_tuple *t,
+ void *buffer)
+{
+ const char *prefix = t->ip_proto == IPPROTO_TCP ? "TCP" : "UDP";
+
+ snprintf(buffer, QEDE_FILTER_PRINT_MAX_LEN,
+ "%s %pI4 (%04x) -> %pI4 (%04x)",
+ prefix, &t->src_ipv4, t->src_port,
+ &t->dst_ipv4, t->dst_port);
+}
+
+static bool qede_flow_spec_ipv6_cmp(struct qede_arfs_tuple *a,
+ struct qede_arfs_tuple *b)
+{
+ if (a->eth_proto != htons(ETH_P_IPV6) ||
+ b->eth_proto != htons(ETH_P_IPV6))
+ return false;
+
+ if (memcmp(&a->src_ipv6, &b->src_ipv6, sizeof(struct in6_addr)))
+ return false;
+
+ if (memcmp(&a->dst_ipv6, &b->dst_ipv6, sizeof(struct in6_addr)))
+ return false;
+
+ return true;
+}
+
+static void qede_flow_build_ipv6_hdr(struct qede_arfs_tuple *t,
+ void *header)
+{
+ __be16 *ports = (__be16 *)(header + ETH_HLEN + sizeof(struct ipv6hdr));
+ struct ipv6hdr *ip6 = (struct ipv6hdr *)(header + ETH_HLEN);
+ struct ethhdr *eth = (struct ethhdr *)header;
+
+ eth->h_proto = t->eth_proto;
+ memcpy(&ip6->saddr, &t->src_ipv6, sizeof(struct in6_addr));
+ memcpy(&ip6->daddr, &t->dst_ipv6, sizeof(struct in6_addr));
+ ip6->version = 0x6;
+
+ if (t->ip_proto == IPPROTO_TCP) {
+ ip6->nexthdr = NEXTHDR_TCP;
+ ip6->payload_len = cpu_to_be16(sizeof(struct tcphdr));
+ } else {
+ ip6->nexthdr = NEXTHDR_UDP;
+ ip6->payload_len = cpu_to_be16(sizeof(struct udphdr));
+ }
+
+ /* ports is weakly typed to suit both TCP and UDP ports */
+ ports[0] = t->src_port;
+ ports[1] = t->dst_port;
+}
+
+/* Validate fields which are set and not accepted by the driver */
+static int qede_flow_spec_validate_unused(struct qede_dev *edev,
+ struct ethtool_rx_flow_spec *fs)
+{
+ if (fs->flow_type & FLOW_MAC_EXT) {
+ DP_INFO(edev, "Don't support MAC extensions\n");
+ return -EOPNOTSUPP;
+ }
+
+ if ((fs->flow_type & FLOW_EXT) &&
+ (fs->h_ext.vlan_etype || fs->h_ext.vlan_tci)) {
+ DP_INFO(edev, "Don't support vlan-based classification\n");
+ return -EOPNOTSUPP;
+ }
+
+ if ((fs->flow_type & FLOW_EXT) &&
+ (fs->h_ext.data[0] || fs->h_ext.data[1])) {
+ DP_INFO(edev, "Don't support user defined data\n");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int qede_set_v4_tuple_to_profile(struct qede_dev *edev,
+ struct qede_arfs_tuple *t)
+{
+ /* We must have Only 4-tuples/l4 port/src ip/dst ip
+ * as an input.
+ */
+ if (t->src_port && t->dst_port && t->src_ipv4 && t->dst_ipv4) {
+ t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
+ } else if (!t->src_port && t->dst_port &&
+ !t->src_ipv4 && !t->dst_ipv4) {
+ t->mode = QED_FILTER_CONFIG_MODE_L4_PORT;
+ } else if (!t->src_port && !t->dst_port &&
+ !t->dst_ipv4 && t->src_ipv4) {
+ t->mode = QED_FILTER_CONFIG_MODE_IP_SRC;
+ } else if (!t->src_port && !t->dst_port &&
+ t->dst_ipv4 && !t->src_ipv4) {
+ t->mode = QED_FILTER_CONFIG_MODE_IP_DEST;
+ } else {
+ DP_INFO(edev, "Invalid N-tuple\n");
+ return -EOPNOTSUPP;
+ }
+
+ t->ip_comp = qede_flow_spec_ipv4_cmp;
+ t->build_hdr = qede_flow_build_ipv4_hdr;
+ t->stringify = qede_flow_stringify_ipv4_hdr;
+
+ return 0;
+}
+
+static int qede_set_v6_tuple_to_profile(struct qede_dev *edev,
+ struct qede_arfs_tuple *t,
+ struct in6_addr *zaddr)
+{
+ /* We must have Only 4-tuples/l4 port/src ip/dst ip
+ * as an input.
+ */
+ if (t->src_port && t->dst_port &&
+ memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr)) &&
+ memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr))) {
+ t->mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
+ } else if (!t->src_port && t->dst_port &&
+ !memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr)) &&
+ !memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr))) {
+ t->mode = QED_FILTER_CONFIG_MODE_L4_PORT;
+ } else if (!t->src_port && !t->dst_port &&
+ !memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr)) &&
+ memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr))) {
+ t->mode = QED_FILTER_CONFIG_MODE_IP_SRC;
+ } else if (!t->src_port && !t->dst_port &&
+ memcmp(&t->dst_ipv6, zaddr, sizeof(struct in6_addr)) &&
+ !memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr))) {
+ t->mode = QED_FILTER_CONFIG_MODE_IP_DEST;
+ } else {
+ DP_INFO(edev, "Invalid N-tuple\n");
+ return -EOPNOTSUPP;
+ }
+
+ t->ip_comp = qede_flow_spec_ipv6_cmp;
+ t->build_hdr = qede_flow_build_ipv6_hdr;
+
+ return 0;
+}
+
+/* Must be called while qede lock is held */
+static struct qede_arfs_fltr_node *
+qede_flow_find_fltr(struct qede_dev *edev, struct qede_arfs_tuple *t)
+{
+ struct qede_arfs_fltr_node *fltr;
+ struct hlist_node *temp;
+ struct hlist_head *head;
+
+ head = QEDE_ARFS_BUCKET_HEAD(edev, 0);
+
+ hlist_for_each_entry_safe(fltr, temp, head, node) {
+ if (fltr->tuple.ip_proto == t->ip_proto &&
+ fltr->tuple.src_port == t->src_port &&
+ fltr->tuple.dst_port == t->dst_port &&
+ t->ip_comp(&fltr->tuple, t))
+ return fltr;
+ }
+
+ return NULL;
+}
+
+static void qede_flow_set_destination(struct qede_dev *edev,
+ struct qede_arfs_fltr_node *n,
+ struct ethtool_rx_flow_spec *fs)
+{
+ if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
+ n->b_is_drop = true;
+ return;
+ }
+
+ n->vfid = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
+ n->rxq_id = ethtool_get_flow_spec_ring(fs->ring_cookie);
+ n->next_rxq_id = n->rxq_id;
+
+ if (n->vfid)
+ DP_VERBOSE(edev, QED_MSG_SP,
+ "Configuring N-tuple for VF 0x%02x\n", n->vfid - 1);
+}
+
+int qede_delete_flow_filter(struct qede_dev *edev, u64 cookie)
+{
+ struct qede_arfs_fltr_node *fltr = NULL;
+ int rc = -EPERM;
+
+ __qede_lock(edev);
+ if (!edev->arfs)
+ goto unlock;
+
+ fltr = qede_get_arfs_fltr_by_loc(QEDE_ARFS_BUCKET_HEAD(edev, 0),
+ cookie);
+ if (!fltr)
+ goto unlock;
+
+ qede_configure_arfs_fltr(edev, fltr, fltr->rxq_id, false);
+
+ rc = qede_poll_arfs_filter_config(edev, fltr);
+ if (rc == 0)
+ qede_dequeue_fltr_and_config_searcher(edev, fltr);
+
+unlock:
+ __qede_unlock(edev);
+ return rc;
+}
+
+int qede_get_arfs_filter_count(struct qede_dev *edev)
+{
+ int count = 0;
+
+ __qede_lock(edev);
+
+ if (!edev->arfs)
+ goto unlock;
+
+ count = edev->arfs->filter_count;
+
+unlock:
+ __qede_unlock(edev);
+ return count;
+}
+
+static int qede_parse_actions(struct qede_dev *edev,
+ struct flow_action *flow_action,
+ struct netlink_ext_ack *extack)
+{
+ const struct flow_action_entry *act;
+ int i;
+
+ if (!flow_action_has_entries(flow_action)) {
+ DP_NOTICE(edev, "No actions received\n");
+ return -EINVAL;
+ }
+
+ if (!flow_action_basic_hw_stats_check(flow_action, extack))
+ return -EOPNOTSUPP;
+
+ flow_action_for_each(i, act, flow_action) {
+ switch (act->id) {
+ case FLOW_ACTION_DROP:
+ break;
+ case FLOW_ACTION_QUEUE:
+ if (act->queue.vf)
+ break;
+
+ if (act->queue.index >= QEDE_RSS_COUNT(edev)) {
+ DP_INFO(edev, "Queue out-of-bounds\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int
+qede_flow_parse_ports(struct qede_dev *edev, struct flow_rule *rule,
+ struct qede_arfs_tuple *t)
+{
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_match_ports match;
+
+ flow_rule_match_ports(rule, &match);
+ if ((match.key->src && match.mask->src != htons(U16_MAX)) ||
+ (match.key->dst && match.mask->dst != htons(U16_MAX))) {
+ DP_NOTICE(edev, "Do not support ports masks\n");
+ return -EINVAL;
+ }
+
+ t->src_port = match.key->src;
+ t->dst_port = match.key->dst;
+ }
+
+ return 0;
+}
+
+static int
+qede_flow_parse_v6_common(struct qede_dev *edev, struct flow_rule *rule,
+ struct qede_arfs_tuple *t)
+{
+ struct in6_addr zero_addr, addr;
+
+ memset(&zero_addr, 0, sizeof(addr));
+ memset(&addr, 0xff, sizeof(addr));
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
+ struct flow_match_ipv6_addrs match;
+
+ flow_rule_match_ipv6_addrs(rule, &match);
+ if ((memcmp(&match.key->src, &zero_addr, sizeof(addr)) &&
+ memcmp(&match.mask->src, &addr, sizeof(addr))) ||
+ (memcmp(&match.key->dst, &zero_addr, sizeof(addr)) &&
+ memcmp(&match.mask->dst, &addr, sizeof(addr)))) {
+ DP_NOTICE(edev,
+ "Do not support IPv6 address prefix/mask\n");
+ return -EINVAL;
+ }
+
+ memcpy(&t->src_ipv6, &match.key->src, sizeof(addr));
+ memcpy(&t->dst_ipv6, &match.key->dst, sizeof(addr));
+ }
+
+ if (qede_flow_parse_ports(edev, rule, t))
+ return -EINVAL;
+
+ return qede_set_v6_tuple_to_profile(edev, t, &zero_addr);
+}
+
+static int
+qede_flow_parse_v4_common(struct qede_dev *edev, struct flow_rule *rule,
+ struct qede_arfs_tuple *t)
+{
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
+ struct flow_match_ipv4_addrs match;
+
+ flow_rule_match_ipv4_addrs(rule, &match);
+ if ((match.key->src && match.mask->src != htonl(U32_MAX)) ||
+ (match.key->dst && match.mask->dst != htonl(U32_MAX))) {
+ DP_NOTICE(edev, "Do not support ipv4 prefix/masks\n");
+ return -EINVAL;
+ }
+
+ t->src_ipv4 = match.key->src;
+ t->dst_ipv4 = match.key->dst;
+ }
+
+ if (qede_flow_parse_ports(edev, rule, t))
+ return -EINVAL;
+
+ return qede_set_v4_tuple_to_profile(edev, t);
+}
+
+static int
+qede_flow_parse_tcp_v6(struct qede_dev *edev, struct flow_rule *rule,
+ struct qede_arfs_tuple *tuple)
+{
+ tuple->ip_proto = IPPROTO_TCP;
+ tuple->eth_proto = htons(ETH_P_IPV6);
+
+ return qede_flow_parse_v6_common(edev, rule, tuple);
+}
+
+static int
+qede_flow_parse_tcp_v4(struct qede_dev *edev, struct flow_rule *rule,
+ struct qede_arfs_tuple *tuple)
+{
+ tuple->ip_proto = IPPROTO_TCP;
+ tuple->eth_proto = htons(ETH_P_IP);
+
+ return qede_flow_parse_v4_common(edev, rule, tuple);
+}
+
+static int
+qede_flow_parse_udp_v6(struct qede_dev *edev, struct flow_rule *rule,
+ struct qede_arfs_tuple *tuple)
+{
+ tuple->ip_proto = IPPROTO_UDP;
+ tuple->eth_proto = htons(ETH_P_IPV6);
+
+ return qede_flow_parse_v6_common(edev, rule, tuple);
+}
+
+static int
+qede_flow_parse_udp_v4(struct qede_dev *edev, struct flow_rule *rule,
+ struct qede_arfs_tuple *tuple)
+{
+ tuple->ip_proto = IPPROTO_UDP;
+ tuple->eth_proto = htons(ETH_P_IP);
+
+ return qede_flow_parse_v4_common(edev, rule, tuple);
+}
+
+static int
+qede_parse_flow_attr(struct qede_dev *edev, __be16 proto,
+ struct flow_rule *rule, struct qede_arfs_tuple *tuple)
+{
+ struct flow_dissector *dissector = rule->match.dissector;
+ int rc = -EINVAL;
+ u8 ip_proto = 0;
+
+ memset(tuple, 0, sizeof(*tuple));
+
+ if (dissector->used_keys &
+ ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_PORTS))) {
+ DP_NOTICE(edev, "Unsupported key set:0x%x\n",
+ dissector->used_keys);
+ return -EOPNOTSUPP;
+ }
+
+ if (proto != htons(ETH_P_IP) &&
+ proto != htons(ETH_P_IPV6)) {
+ DP_NOTICE(edev, "Unsupported proto=0x%x\n", proto);
+ return -EPROTONOSUPPORT;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
+
+ flow_rule_match_basic(rule, &match);
+ ip_proto = match.key->ip_proto;
+ }
+
+ if (ip_proto == IPPROTO_TCP && proto == htons(ETH_P_IP))
+ rc = qede_flow_parse_tcp_v4(edev, rule, tuple);
+ else if (ip_proto == IPPROTO_TCP && proto == htons(ETH_P_IPV6))
+ rc = qede_flow_parse_tcp_v6(edev, rule, tuple);
+ else if (ip_proto == IPPROTO_UDP && proto == htons(ETH_P_IP))
+ rc = qede_flow_parse_udp_v4(edev, rule, tuple);
+ else if (ip_proto == IPPROTO_UDP && proto == htons(ETH_P_IPV6))
+ rc = qede_flow_parse_udp_v6(edev, rule, tuple);
+ else
+ DP_NOTICE(edev, "Invalid protocol request\n");
+
+ return rc;
+}
+
+int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
+ struct flow_cls_offload *f)
+{
+ struct qede_arfs_fltr_node *n;
+ int min_hlen, rc = -EINVAL;
+ struct qede_arfs_tuple t;
+
+ __qede_lock(edev);
+
+ if (!edev->arfs) {
+ rc = -EPERM;
+ goto unlock;
+ }
+
+ /* parse flower attribute and prepare filter */
+ if (qede_parse_flow_attr(edev, proto, f->rule, &t))
+ goto unlock;
+
+ /* Validate profile mode and number of filters */
+ if ((edev->arfs->filter_count && edev->arfs->mode != t.mode) ||
+ edev->arfs->filter_count == QEDE_RFS_MAX_FLTR) {
+ DP_NOTICE(edev,
+ "Filter configuration invalidated, filter mode=0x%x, configured mode=0x%x, filter count=0x%x\n",
+ t.mode, edev->arfs->mode, edev->arfs->filter_count);
+ goto unlock;
+ }
+
+ /* parse tc actions and get the vf_id */
+ if (qede_parse_actions(edev, &f->rule->action, f->common.extack))
+ goto unlock;
+
+ if (qede_flow_find_fltr(edev, &t)) {
+ rc = -EEXIST;
+ goto unlock;
+ }
+
+ n = kzalloc(sizeof(*n), GFP_KERNEL);
+ if (!n) {
+ rc = -ENOMEM;
+ goto unlock;
+ }
+
+ min_hlen = qede_flow_get_min_header_size(&t);
+
+ n->data = kzalloc(min_hlen, GFP_KERNEL);
+ if (!n->data) {
+ kfree(n);
+ rc = -ENOMEM;
+ goto unlock;
+ }
+
+ memcpy(&n->tuple, &t, sizeof(n->tuple));
+
+ n->buf_len = min_hlen;
+ n->b_is_drop = true;
+ n->sw_id = f->cookie;
+
+ n->tuple.build_hdr(&n->tuple, n->data);
+
+ rc = qede_enqueue_fltr_and_config_searcher(edev, n, 0);
+ if (rc)
+ goto unlock;
+
+ qede_configure_arfs_fltr(edev, n, n->rxq_id, true);
+ rc = qede_poll_arfs_filter_config(edev, n);
+
+unlock:
+ __qede_unlock(edev);
+ return rc;
+}
+
+static int qede_flow_spec_validate(struct qede_dev *edev,
+ struct flow_action *flow_action,
+ struct qede_arfs_tuple *t,
+ __u32 location)
+{
+ if (location >= QEDE_RFS_MAX_FLTR) {
+ DP_INFO(edev, "Location out-of-bounds\n");
+ return -EINVAL;
+ }
+
+ /* Check location isn't already in use */
+ if (test_bit(location, edev->arfs->arfs_fltr_bmap)) {
+ DP_INFO(edev, "Location already in use\n");
+ return -EINVAL;
+ }
+
+ /* Check if the filtering-mode could support the filter */
+ if (edev->arfs->filter_count &&
+ edev->arfs->mode != t->mode) {
+ DP_INFO(edev,
+ "flow_spec would require filtering mode %08x, but %08x is configured\n",
+ t->mode, edev->arfs->filter_count);
+ return -EINVAL;
+ }
+
+ if (qede_parse_actions(edev, flow_action, NULL))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int qede_flow_spec_to_rule(struct qede_dev *edev,
+ struct qede_arfs_tuple *t,
+ struct ethtool_rx_flow_spec *fs)
+{
+ struct ethtool_rx_flow_spec_input input = {};
+ struct ethtool_rx_flow_rule *flow;
+ __be16 proto;
+ int err = 0;
+
+ if (qede_flow_spec_validate_unused(edev, fs))
+ return -EOPNOTSUPP;
+
+ switch ((fs->flow_type & ~FLOW_EXT)) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ proto = htons(ETH_P_IP);
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ proto = htons(ETH_P_IPV6);
+ break;
+ default:
+ DP_VERBOSE(edev, NETIF_MSG_IFUP,
+ "Can't support flow of type %08x\n", fs->flow_type);
+ return -EOPNOTSUPP;
+ }
+
+ input.fs = fs;
+ flow = ethtool_rx_flow_rule_create(&input);
+ if (IS_ERR(flow))
+ return PTR_ERR(flow);
+
+ if (qede_parse_flow_attr(edev, proto, flow->rule, t)) {
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ /* Make sure location is valid and filter isn't already set */
+ err = qede_flow_spec_validate(edev, &flow->rule->action, t,
+ fs->location);
+err_out:
+ ethtool_rx_flow_rule_destroy(flow);
+ return err;
+
+}
+
+int qede_add_cls_rule(struct qede_dev *edev, struct ethtool_rxnfc *info)
+{
+ struct ethtool_rx_flow_spec *fsp = &info->fs;
+ struct qede_arfs_fltr_node *n;
+ struct qede_arfs_tuple t;
+ int min_hlen, rc;
+
+ __qede_lock(edev);
+
+ if (!edev->arfs) {
+ rc = -EPERM;
+ goto unlock;
+ }
+
+ /* Translate the flow specification into something fittign our DB */
+ rc = qede_flow_spec_to_rule(edev, &t, fsp);
+ if (rc)
+ goto unlock;
+
+ if (qede_flow_find_fltr(edev, &t)) {
+ rc = -EINVAL;
+ goto unlock;
+ }
+
+ n = kzalloc(sizeof(*n), GFP_KERNEL);
+ if (!n) {
+ rc = -ENOMEM;
+ goto unlock;
+ }
+
+ min_hlen = qede_flow_get_min_header_size(&t);
+ n->data = kzalloc(min_hlen, GFP_KERNEL);
+ if (!n->data) {
+ kfree(n);
+ rc = -ENOMEM;
+ goto unlock;
+ }
+
+ n->sw_id = fsp->location;
+ set_bit(n->sw_id, edev->arfs->arfs_fltr_bmap);
+ n->buf_len = min_hlen;
+
+ memcpy(&n->tuple, &t, sizeof(n->tuple));
+
+ qede_flow_set_destination(edev, n, fsp);
+
+ /* Build a minimal header according to the flow */
+ n->tuple.build_hdr(&n->tuple, n->data);
+
+ rc = qede_enqueue_fltr_and_config_searcher(edev, n, 0);
+ if (rc)
+ goto unlock;
+
+ qede_configure_arfs_fltr(edev, n, n->rxq_id, true);
+ rc = qede_poll_arfs_filter_config(edev, n);
+unlock:
+ __qede_unlock(edev);
+
+ return rc;
+}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
new file mode 100644
index 000000000..cb1746bc0
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -0,0 +1,1805 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/* QLogic qede NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ * Copyright (c) 2019-2020 Marvell International Ltd.
+ */
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/bpf_trace.h>
+#include <net/udp_tunnel.h>
+#include <linux/ip.h>
+#include <net/gro.h>
+#include <net/ipv6.h>
+#include <net/tcp.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <net/ip6_checksum.h>
+#include "qede_ptp.h"
+
+#include <linux/qed/qed_if.h>
+#include "qede.h"
+/*********************************
+ * Content also used by slowpath *
+ *********************************/
+
+int qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy)
+{
+ struct sw_rx_data *sw_rx_data;
+ struct eth_rx_bd *rx_bd;
+ dma_addr_t mapping;
+ struct page *data;
+
+ /* In case lazy-allocation is allowed, postpone allocation until the
+ * end of the NAPI run. We'd still need to make sure the Rx ring has
+ * sufficient buffers to guarantee an additional Rx interrupt.
+ */
+ if (allow_lazy && likely(rxq->filled_buffers > 12)) {
+ rxq->filled_buffers--;
+ return 0;
+ }
+
+ data = alloc_pages(GFP_ATOMIC, 0);
+ if (unlikely(!data))
+ return -ENOMEM;
+
+ /* Map the entire page as it would be used
+ * for multiple RX buffer segment size mapping.
+ */
+ mapping = dma_map_page(rxq->dev, data, 0,
+ PAGE_SIZE, rxq->data_direction);
+ if (unlikely(dma_mapping_error(rxq->dev, mapping))) {
+ __free_page(data);
+ return -ENOMEM;
+ }
+
+ sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
+ sw_rx_data->page_offset = 0;
+ sw_rx_data->data = data;
+ sw_rx_data->mapping = mapping;
+
+ /* Advance PROD and get BD pointer */
+ rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring);
+ WARN_ON(!rx_bd);
+ rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping));
+ rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping) +
+ rxq->rx_headroom);
+
+ rxq->sw_rx_prod++;
+ rxq->filled_buffers++;
+
+ return 0;
+}
+
+/* Unmap the data and free skb */
+int qede_free_tx_pkt(struct qede_dev *edev, struct qede_tx_queue *txq, int *len)
+{
+ u16 idx = txq->sw_tx_cons;
+ struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb;
+ struct eth_tx_1st_bd *first_bd;
+ struct eth_tx_bd *tx_data_bd;
+ int bds_consumed = 0;
+ int nbds;
+ bool data_split = txq->sw_tx_ring.skbs[idx].flags & QEDE_TSO_SPLIT_BD;
+ int i, split_bd_len = 0;
+
+ if (unlikely(!skb)) {
+ DP_ERR(edev,
+ "skb is null for txq idx=%d txq->sw_tx_cons=%d txq->sw_tx_prod=%d\n",
+ idx, txq->sw_tx_cons, txq->sw_tx_prod);
+ return -1;
+ }
+
+ *len = skb->len;
+
+ first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
+
+ bds_consumed++;
+
+ nbds = first_bd->data.nbds;
+
+ if (data_split) {
+ struct eth_tx_bd *split = (struct eth_tx_bd *)
+ qed_chain_consume(&txq->tx_pbl);
+ split_bd_len = BD_UNMAP_LEN(split);
+ bds_consumed++;
+ }
+ dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
+ BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
+
+ /* Unmap the data of the skb frags */
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) {
+ tx_data_bd = (struct eth_tx_bd *)
+ qed_chain_consume(&txq->tx_pbl);
+ dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
+ BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
+ }
+
+ while (bds_consumed++ < nbds)
+ qed_chain_consume(&txq->tx_pbl);
+
+ /* Free skb */
+ dev_kfree_skb_any(skb);
+ txq->sw_tx_ring.skbs[idx].skb = NULL;
+ txq->sw_tx_ring.skbs[idx].flags = 0;
+
+ return 0;
+}
+
+/* Unmap the data and free skb when mapping failed during start_xmit */
+static void qede_free_failed_tx_pkt(struct qede_tx_queue *txq,
+ struct eth_tx_1st_bd *first_bd,
+ int nbd, bool data_split)
+{
+ u16 idx = txq->sw_tx_prod;
+ struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb;
+ struct eth_tx_bd *tx_data_bd;
+ int i, split_bd_len = 0;
+
+ /* Return prod to its position before this skb was handled */
+ qed_chain_set_prod(&txq->tx_pbl,
+ le16_to_cpu(txq->tx_db.data.bd_prod), first_bd);
+
+ first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl);
+
+ if (data_split) {
+ struct eth_tx_bd *split = (struct eth_tx_bd *)
+ qed_chain_produce(&txq->tx_pbl);
+ split_bd_len = BD_UNMAP_LEN(split);
+ nbd--;
+ }
+
+ dma_unmap_single(txq->dev, BD_UNMAP_ADDR(first_bd),
+ BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
+
+ /* Unmap the data of the skb frags */
+ for (i = 0; i < nbd; i++) {
+ tx_data_bd = (struct eth_tx_bd *)
+ qed_chain_produce(&txq->tx_pbl);
+ if (tx_data_bd->nbytes)
+ dma_unmap_page(txq->dev,
+ BD_UNMAP_ADDR(tx_data_bd),
+ BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
+ }
+
+ /* Return again prod to its position before this skb was handled */
+ qed_chain_set_prod(&txq->tx_pbl,
+ le16_to_cpu(txq->tx_db.data.bd_prod), first_bd);
+
+ /* Free skb */
+ dev_kfree_skb_any(skb);
+ txq->sw_tx_ring.skbs[idx].skb = NULL;
+ txq->sw_tx_ring.skbs[idx].flags = 0;
+}
+
+static u32 qede_xmit_type(struct sk_buff *skb, int *ipv6_ext)
+{
+ u32 rc = XMIT_L4_CSUM;
+ __be16 l3_proto;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return XMIT_PLAIN;
+
+ l3_proto = vlan_get_protocol(skb);
+ if (l3_proto == htons(ETH_P_IPV6) &&
+ (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
+ *ipv6_ext = 1;
+
+ if (skb->encapsulation) {
+ rc |= XMIT_ENC;
+ if (skb_is_gso(skb)) {
+ unsigned short gso_type = skb_shinfo(skb)->gso_type;
+
+ if ((gso_type & SKB_GSO_UDP_TUNNEL_CSUM) ||
+ (gso_type & SKB_GSO_GRE_CSUM))
+ rc |= XMIT_ENC_GSO_L4_CSUM;
+
+ rc |= XMIT_LSO;
+ return rc;
+ }
+ }
+
+ if (skb_is_gso(skb))
+ rc |= XMIT_LSO;
+
+ return rc;
+}
+
+static void qede_set_params_for_ipv6_ext(struct sk_buff *skb,
+ struct eth_tx_2nd_bd *second_bd,
+ struct eth_tx_3rd_bd *third_bd)
+{
+ u8 l4_proto;
+ u16 bd2_bits1 = 0, bd2_bits2 = 0;
+
+ bd2_bits1 |= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT);
+
+ bd2_bits2 |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) &
+ ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK)
+ << ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT;
+
+ bd2_bits1 |= (ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH <<
+ ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT);
+
+ if (vlan_get_protocol(skb) == htons(ETH_P_IPV6))
+ l4_proto = ipv6_hdr(skb)->nexthdr;
+ else
+ l4_proto = ip_hdr(skb)->protocol;
+
+ if (l4_proto == IPPROTO_UDP)
+ bd2_bits1 |= 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT;
+
+ if (third_bd)
+ third_bd->data.bitfields |=
+ cpu_to_le16(((tcp_hdrlen(skb) / 4) &
+ ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK) <<
+ ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT);
+
+ second_bd->data.bitfields1 = cpu_to_le16(bd2_bits1);
+ second_bd->data.bitfields2 = cpu_to_le16(bd2_bits2);
+}
+
+static int map_frag_to_bd(struct qede_tx_queue *txq,
+ skb_frag_t *frag, struct eth_tx_bd *bd)
+{
+ dma_addr_t mapping;
+
+ /* Map skb non-linear frag data for DMA */
+ mapping = skb_frag_dma_map(txq->dev, frag, 0,
+ skb_frag_size(frag), DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(txq->dev, mapping)))
+ return -ENOMEM;
+
+ /* Setup the data pointer of the frag data */
+ BD_SET_UNMAP_ADDR_LEN(bd, mapping, skb_frag_size(frag));
+
+ return 0;
+}
+
+static u16 qede_get_skb_hlen(struct sk_buff *skb, bool is_encap_pkt)
+{
+ if (is_encap_pkt)
+ return skb_inner_tcp_all_headers(skb);
+
+ return skb_tcp_all_headers(skb);
+}
+
+/* +2 for 1st BD for headers and 2nd BD for headlen (if required) */
+#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
+static bool qede_pkt_req_lin(struct sk_buff *skb, u8 xmit_type)
+{
+ int allowed_frags = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1;
+
+ if (xmit_type & XMIT_LSO) {
+ int hlen;
+
+ hlen = qede_get_skb_hlen(skb, xmit_type & XMIT_ENC);
+
+ /* linear payload would require its own BD */
+ if (skb_headlen(skb) > hlen)
+ allowed_frags--;
+ }
+
+ return (skb_shinfo(skb)->nr_frags > allowed_frags);
+}
+#endif
+
+static inline void qede_update_tx_producer(struct qede_tx_queue *txq)
+{
+ /* wmb makes sure that the BDs data is updated before updating the
+ * producer, otherwise FW may read old data from the BDs.
+ */
+ wmb();
+ barrier();
+ writel(txq->tx_db.raw, txq->doorbell_addr);
+
+ /* Fence required to flush the write combined buffer, since another
+ * CPU may write to the same doorbell address and data may be lost
+ * due to relaxed order nature of write combined bar.
+ */
+ wmb();
+}
+
+static int qede_xdp_xmit(struct qede_tx_queue *txq, dma_addr_t dma, u16 pad,
+ u16 len, struct page *page, struct xdp_frame *xdpf)
+{
+ struct eth_tx_1st_bd *bd;
+ struct sw_tx_xdp *xdp;
+ u16 val;
+
+ if (unlikely(qed_chain_get_elem_used(&txq->tx_pbl) >=
+ txq->num_tx_buffers)) {
+ txq->stopped_cnt++;
+ return -ENOMEM;
+ }
+
+ bd = qed_chain_produce(&txq->tx_pbl);
+ bd->data.nbds = 1;
+ bd->data.bd_flags.bitfields = BIT(ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT);
+
+ val = (len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
+ ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
+
+ bd->data.bitfields = cpu_to_le16(val);
+
+ /* We can safely ignore the offset, as it's 0 for XDP */
+ BD_SET_UNMAP_ADDR_LEN(bd, dma + pad, len);
+
+ xdp = txq->sw_tx_ring.xdp + txq->sw_tx_prod;
+ xdp->mapping = dma;
+ xdp->page = page;
+ xdp->xdpf = xdpf;
+
+ txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers;
+
+ return 0;
+}
+
+int qede_xdp_transmit(struct net_device *dev, int n_frames,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ struct device *dmadev = &edev->pdev->dev;
+ struct qede_tx_queue *xdp_tx;
+ struct xdp_frame *xdpf;
+ dma_addr_t mapping;
+ int i, nxmit = 0;
+ u16 xdp_prod;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ if (unlikely(!netif_running(dev)))
+ return -ENETDOWN;
+
+ i = smp_processor_id() % edev->total_xdp_queues;
+ xdp_tx = edev->fp_array[i].xdp_tx;
+
+ spin_lock(&xdp_tx->xdp_tx_lock);
+
+ for (i = 0; i < n_frames; i++) {
+ xdpf = frames[i];
+
+ mapping = dma_map_single(dmadev, xdpf->data, xdpf->len,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dmadev, mapping)))
+ break;
+
+ if (unlikely(qede_xdp_xmit(xdp_tx, mapping, 0, xdpf->len,
+ NULL, xdpf)))
+ break;
+ nxmit++;
+ }
+
+ if (flags & XDP_XMIT_FLUSH) {
+ xdp_prod = qed_chain_get_prod_idx(&xdp_tx->tx_pbl);
+
+ xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod);
+ qede_update_tx_producer(xdp_tx);
+ }
+
+ spin_unlock(&xdp_tx->xdp_tx_lock);
+
+ return nxmit;
+}
+
+int qede_txq_has_work(struct qede_tx_queue *txq)
+{
+ u16 hw_bd_cons;
+
+ /* Tell compiler that consumer and producer can change */
+ barrier();
+ hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
+ if (qed_chain_get_cons_idx(&txq->tx_pbl) == hw_bd_cons + 1)
+ return 0;
+
+ return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl);
+}
+
+static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
+{
+ struct sw_tx_xdp *xdp_info, *xdp_arr = txq->sw_tx_ring.xdp;
+ struct device *dev = &edev->pdev->dev;
+ struct xdp_frame *xdpf;
+ u16 hw_bd_cons;
+
+ hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
+ barrier();
+
+ while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
+ xdp_info = xdp_arr + txq->sw_tx_cons;
+ xdpf = xdp_info->xdpf;
+
+ if (xdpf) {
+ dma_unmap_single(dev, xdp_info->mapping, xdpf->len,
+ DMA_TO_DEVICE);
+ xdp_return_frame(xdpf);
+
+ xdp_info->xdpf = NULL;
+ } else {
+ dma_unmap_page(dev, xdp_info->mapping, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ __free_page(xdp_info->page);
+ }
+
+ qed_chain_consume(&txq->tx_pbl);
+ txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers;
+ txq->xmit_pkts++;
+ }
+}
+
+static int qede_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
+{
+ unsigned int pkts_compl = 0, bytes_compl = 0;
+ struct netdev_queue *netdev_txq;
+ u16 hw_bd_cons;
+ int rc;
+
+ netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id);
+
+ hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
+ barrier();
+
+ while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
+ int len = 0;
+
+ rc = qede_free_tx_pkt(edev, txq, &len);
+ if (rc) {
+ DP_NOTICE(edev, "hw_bd_cons = %d, chain_cons=%d\n",
+ hw_bd_cons,
+ qed_chain_get_cons_idx(&txq->tx_pbl));
+ break;
+ }
+
+ bytes_compl += len;
+ pkts_compl++;
+ txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers;
+ txq->xmit_pkts++;
+ }
+
+ netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl);
+
+ /* Need to make the tx_bd_cons update visible to start_xmit()
+ * before checking for netif_tx_queue_stopped(). Without the
+ * memory barrier, there is a small possibility that
+ * start_xmit() will miss it and cause the queue to be stopped
+ * forever.
+ * On the other hand we need an rmb() here to ensure the proper
+ * ordering of bit testing in the following
+ * netif_tx_queue_stopped(txq) call.
+ */
+ smp_mb();
+
+ if (unlikely(netif_tx_queue_stopped(netdev_txq))) {
+ /* Taking tx_lock is needed to prevent reenabling the queue
+ * while it's empty. This could have happen if rx_action() gets
+ * suspended in qede_tx_int() after the condition before
+ * netif_tx_wake_queue(), while tx_action (qede_start_xmit()):
+ *
+ * stops the queue->sees fresh tx_bd_cons->releases the queue->
+ * sends some packets consuming the whole queue again->
+ * stops the queue
+ */
+
+ __netif_tx_lock(netdev_txq, smp_processor_id());
+
+ if ((netif_tx_queue_stopped(netdev_txq)) &&
+ (edev->state == QEDE_STATE_OPEN) &&
+ (qed_chain_get_elem_left(&txq->tx_pbl)
+ >= (MAX_SKB_FRAGS + 1))) {
+ netif_tx_wake_queue(netdev_txq);
+ DP_VERBOSE(edev, NETIF_MSG_TX_DONE,
+ "Wake queue was called\n");
+ }
+
+ __netif_tx_unlock(netdev_txq);
+ }
+
+ return 0;
+}
+
+bool qede_has_rx_work(struct qede_rx_queue *rxq)
+{
+ u16 hw_comp_cons, sw_comp_cons;
+
+ /* Tell compiler that status block fields can change */
+ barrier();
+
+ hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
+ sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
+
+ return hw_comp_cons != sw_comp_cons;
+}
+
+static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
+{
+ qed_chain_consume(&rxq->rx_bd_ring);
+ rxq->sw_rx_cons++;
+}
+
+/* This function reuses the buffer(from an offset) from
+ * consumer index to producer index in the bd ring
+ */
+static inline void qede_reuse_page(struct qede_rx_queue *rxq,
+ struct sw_rx_data *curr_cons)
+{
+ struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
+ struct sw_rx_data *curr_prod;
+ dma_addr_t new_mapping;
+
+ curr_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
+ *curr_prod = *curr_cons;
+
+ new_mapping = curr_prod->mapping + curr_prod->page_offset;
+
+ rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(new_mapping));
+ rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(new_mapping) +
+ rxq->rx_headroom);
+
+ rxq->sw_rx_prod++;
+ curr_cons->data = NULL;
+}
+
+/* In case of allocation failures reuse buffers
+ * from consumer index to produce buffers for firmware
+ */
+void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count)
+{
+ struct sw_rx_data *curr_cons;
+
+ for (; count > 0; count--) {
+ curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
+ qede_reuse_page(rxq, curr_cons);
+ qede_rx_bd_ring_consume(rxq);
+ }
+}
+
+static inline int qede_realloc_rx_buffer(struct qede_rx_queue *rxq,
+ struct sw_rx_data *curr_cons)
+{
+ /* Move to the next segment in the page */
+ curr_cons->page_offset += rxq->rx_buf_seg_size;
+
+ if (curr_cons->page_offset == PAGE_SIZE) {
+ if (unlikely(qede_alloc_rx_buffer(rxq, true))) {
+ /* Since we failed to allocate new buffer
+ * current buffer can be used again.
+ */
+ curr_cons->page_offset -= rxq->rx_buf_seg_size;
+
+ return -ENOMEM;
+ }
+
+ dma_unmap_page(rxq->dev, curr_cons->mapping,
+ PAGE_SIZE, rxq->data_direction);
+ } else {
+ /* Increment refcount of the page as we don't want
+ * network stack to take the ownership of the page
+ * which can be recycled multiple times by the driver.
+ */
+ page_ref_inc(curr_cons->data);
+ qede_reuse_page(rxq, curr_cons);
+ }
+
+ return 0;
+}
+
+void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq)
+{
+ u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring);
+ u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring);
+ struct eth_rx_prod_data rx_prods = {0};
+
+ /* Update producers */
+ rx_prods.bd_prod = cpu_to_le16(bd_prod);
+ rx_prods.cqe_prod = cpu_to_le16(cqe_prod);
+
+ /* Make sure that the BD and SGE data is updated before updating the
+ * producers since FW might read the BD/SGE right after the producer
+ * is updated.
+ */
+ wmb();
+
+ internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
+ (u32 *)&rx_prods);
+}
+
+static void qede_get_rxhash(struct sk_buff *skb, u8 bitfields, __le32 rss_hash)
+{
+ enum pkt_hash_types hash_type = PKT_HASH_TYPE_NONE;
+ enum rss_hash_type htype;
+ u32 hash = 0;
+
+ htype = GET_FIELD(bitfields, ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE);
+ if (htype) {
+ hash_type = ((htype == RSS_HASH_TYPE_IPV4) ||
+ (htype == RSS_HASH_TYPE_IPV6)) ?
+ PKT_HASH_TYPE_L3 : PKT_HASH_TYPE_L4;
+ hash = le32_to_cpu(rss_hash);
+ }
+ skb_set_hash(skb, hash, hash_type);
+}
+
+static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag)
+{
+ skb_checksum_none_assert(skb);
+
+ if (csum_flag & QEDE_CSUM_UNNECESSARY)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ if (csum_flag & QEDE_TUNN_CSUM_UNNECESSARY) {
+ skb->csum_level = 1;
+ skb->encapsulation = 1;
+ }
+}
+
+static inline void qede_skb_receive(struct qede_dev *edev,
+ struct qede_fastpath *fp,
+ struct qede_rx_queue *rxq,
+ struct sk_buff *skb, u16 vlan_tag)
+{
+ if (vlan_tag)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
+
+ napi_gro_receive(&fp->napi, skb);
+}
+
+static void qede_set_gro_params(struct qede_dev *edev,
+ struct sk_buff *skb,
+ struct eth_fast_path_rx_tpa_start_cqe *cqe)
+{
+ u16 parsing_flags = le16_to_cpu(cqe->pars_flags.flags);
+
+ if (((parsing_flags >> PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) &
+ PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == 2)
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+ else
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+
+ skb_shinfo(skb)->gso_size = __le16_to_cpu(cqe->len_on_first_bd) -
+ cqe->header_len;
+}
+
+static int qede_fill_frag_skb(struct qede_dev *edev,
+ struct qede_rx_queue *rxq,
+ u8 tpa_agg_index, u16 len_on_bd)
+{
+ struct sw_rx_data *current_bd = &rxq->sw_rx_ring[rxq->sw_rx_cons &
+ NUM_RX_BDS_MAX];
+ struct qede_agg_info *tpa_info = &rxq->tpa_info[tpa_agg_index];
+ struct sk_buff *skb = tpa_info->skb;
+
+ if (unlikely(tpa_info->state != QEDE_AGG_STATE_START))
+ goto out;
+
+ /* Add one frag and update the appropriate fields in the skb */
+ skb_fill_page_desc(skb, tpa_info->frag_id++,
+ current_bd->data,
+ current_bd->page_offset + rxq->rx_headroom,
+ len_on_bd);
+
+ if (unlikely(qede_realloc_rx_buffer(rxq, current_bd))) {
+ /* Incr page ref count to reuse on allocation failure
+ * so that it doesn't get freed while freeing SKB.
+ */
+ page_ref_inc(current_bd->data);
+ goto out;
+ }
+
+ qede_rx_bd_ring_consume(rxq);
+
+ skb->data_len += len_on_bd;
+ skb->truesize += rxq->rx_buf_seg_size;
+ skb->len += len_on_bd;
+
+ return 0;
+
+out:
+ tpa_info->state = QEDE_AGG_STATE_ERROR;
+ qede_recycle_rx_bd_ring(rxq, 1);
+
+ return -ENOMEM;
+}
+
+static bool qede_tunn_exist(u16 flag)
+{
+ return !!(flag & (PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
+ PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT));
+}
+
+static u8 qede_check_tunn_csum(u16 flag)
+{
+ u16 csum_flag = 0;
+ u8 tcsum = 0;
+
+ if (flag & (PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK <<
+ PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT))
+ csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<
+ PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT;
+
+ if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
+ PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
+ csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
+ PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
+ tcsum = QEDE_TUNN_CSUM_UNNECESSARY;
+ }
+
+ csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK <<
+ PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT |
+ PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
+ PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
+
+ if (csum_flag & flag)
+ return QEDE_CSUM_ERROR;
+
+ return QEDE_CSUM_UNNECESSARY | tcsum;
+}
+
+static inline struct sk_buff *
+qede_build_skb(struct qede_rx_queue *rxq,
+ struct sw_rx_data *bd, u16 len, u16 pad)
+{
+ struct sk_buff *skb;
+ void *buf;
+
+ buf = page_address(bd->data) + bd->page_offset;
+ skb = build_skb(buf, rxq->rx_buf_seg_size);
+
+ if (unlikely(!skb))
+ return NULL;
+
+ skb_reserve(skb, pad);
+ skb_put(skb, len);
+
+ return skb;
+}
+
+static struct sk_buff *
+qede_tpa_rx_build_skb(struct qede_dev *edev,
+ struct qede_rx_queue *rxq,
+ struct sw_rx_data *bd, u16 len, u16 pad,
+ bool alloc_skb)
+{
+ struct sk_buff *skb;
+
+ skb = qede_build_skb(rxq, bd, len, pad);
+ bd->page_offset += rxq->rx_buf_seg_size;
+
+ if (bd->page_offset == PAGE_SIZE) {
+ if (unlikely(qede_alloc_rx_buffer(rxq, true))) {
+ DP_NOTICE(edev,
+ "Failed to allocate RX buffer for tpa start\n");
+ bd->page_offset -= rxq->rx_buf_seg_size;
+ page_ref_inc(bd->data);
+ dev_kfree_skb_any(skb);
+ return NULL;
+ }
+ } else {
+ page_ref_inc(bd->data);
+ qede_reuse_page(rxq, bd);
+ }
+
+ /* We've consumed the first BD and prepared an SKB */
+ qede_rx_bd_ring_consume(rxq);
+
+ return skb;
+}
+
+static struct sk_buff *
+qede_rx_build_skb(struct qede_dev *edev,
+ struct qede_rx_queue *rxq,
+ struct sw_rx_data *bd, u16 len, u16 pad)
+{
+ struct sk_buff *skb = NULL;
+
+ /* For smaller frames still need to allocate skb, memcpy
+ * data and benefit in reusing the page segment instead of
+ * un-mapping it.
+ */
+ if ((len + pad <= edev->rx_copybreak)) {
+ unsigned int offset = bd->page_offset + pad;
+
+ skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
+ if (unlikely(!skb))
+ return NULL;
+
+ skb_reserve(skb, pad);
+ skb_put_data(skb, page_address(bd->data) + offset, len);
+ qede_reuse_page(rxq, bd);
+ goto out;
+ }
+
+ skb = qede_build_skb(rxq, bd, len, pad);
+
+ if (unlikely(qede_realloc_rx_buffer(rxq, bd))) {
+ /* Incr page ref count to reuse on allocation failure so
+ * that it doesn't get freed while freeing SKB [as its
+ * already mapped there].
+ */
+ page_ref_inc(bd->data);
+ dev_kfree_skb_any(skb);
+ return NULL;
+ }
+out:
+ /* We've consumed the first BD and prepared an SKB */
+ qede_rx_bd_ring_consume(rxq);
+
+ return skb;
+}
+
+static void qede_tpa_start(struct qede_dev *edev,
+ struct qede_rx_queue *rxq,
+ struct eth_fast_path_rx_tpa_start_cqe *cqe)
+{
+ struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
+ struct sw_rx_data *sw_rx_data_cons;
+ u16 pad;
+
+ sw_rx_data_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
+ pad = cqe->placement_offset + rxq->rx_headroom;
+
+ tpa_info->skb = qede_tpa_rx_build_skb(edev, rxq, sw_rx_data_cons,
+ le16_to_cpu(cqe->len_on_first_bd),
+ pad, false);
+ tpa_info->buffer.page_offset = sw_rx_data_cons->page_offset;
+ tpa_info->buffer.mapping = sw_rx_data_cons->mapping;
+
+ if (unlikely(!tpa_info->skb)) {
+ DP_NOTICE(edev, "Failed to allocate SKB for gro\n");
+
+ /* Consume from ring but do not produce since
+ * this might be used by FW still, it will be re-used
+ * at TPA end.
+ */
+ tpa_info->tpa_start_fail = true;
+ qede_rx_bd_ring_consume(rxq);
+ tpa_info->state = QEDE_AGG_STATE_ERROR;
+ goto cons_buf;
+ }
+
+ tpa_info->frag_id = 0;
+ tpa_info->state = QEDE_AGG_STATE_START;
+
+ if ((le16_to_cpu(cqe->pars_flags.flags) >>
+ PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT) &
+ PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK)
+ tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
+ else
+ tpa_info->vlan_tag = 0;
+
+ qede_get_rxhash(tpa_info->skb, cqe->bitfields, cqe->rss_hash);
+
+ /* This is needed in order to enable forwarding support */
+ qede_set_gro_params(edev, tpa_info->skb, cqe);
+
+cons_buf: /* We still need to handle bd_len_list to consume buffers */
+ if (likely(cqe->bw_ext_bd_len_list[0]))
+ qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
+ le16_to_cpu(cqe->bw_ext_bd_len_list[0]));
+
+ if (unlikely(cqe->bw_ext_bd_len_list[1])) {
+ DP_ERR(edev,
+ "Unlikely - got a TPA aggregation with more than one bw_ext_bd_len_list entry in the TPA start\n");
+ tpa_info->state = QEDE_AGG_STATE_ERROR;
+ }
+}
+
+#ifdef CONFIG_INET
+static void qede_gro_ip_csum(struct sk_buff *skb)
+{
+ const struct iphdr *iph = ip_hdr(skb);
+ struct tcphdr *th;
+
+ skb_set_transport_header(skb, sizeof(struct iphdr));
+ th = tcp_hdr(skb);
+
+ th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
+ iph->saddr, iph->daddr, 0);
+
+ tcp_gro_complete(skb);
+}
+
+static void qede_gro_ipv6_csum(struct sk_buff *skb)
+{
+ struct ipv6hdr *iph = ipv6_hdr(skb);
+ struct tcphdr *th;
+
+ skb_set_transport_header(skb, sizeof(struct ipv6hdr));
+ th = tcp_hdr(skb);
+
+ th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
+ &iph->saddr, &iph->daddr, 0);
+ tcp_gro_complete(skb);
+}
+#endif
+
+static void qede_gro_receive(struct qede_dev *edev,
+ struct qede_fastpath *fp,
+ struct sk_buff *skb,
+ u16 vlan_tag)
+{
+ /* FW can send a single MTU sized packet from gro flow
+ * due to aggregation timeout/last segment etc. which
+ * is not expected to be a gro packet. If a skb has zero
+ * frags then simply push it in the stack as non gso skb.
+ */
+ if (unlikely(!skb->data_len)) {
+ skb_shinfo(skb)->gso_type = 0;
+ skb_shinfo(skb)->gso_size = 0;
+ goto send_skb;
+ }
+
+#ifdef CONFIG_INET
+ if (skb_shinfo(skb)->gso_size) {
+ skb_reset_network_header(skb);
+
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ qede_gro_ip_csum(skb);
+ break;
+ case htons(ETH_P_IPV6):
+ qede_gro_ipv6_csum(skb);
+ break;
+ default:
+ DP_ERR(edev,
+ "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
+ ntohs(skb->protocol));
+ }
+ }
+#endif
+
+send_skb:
+ skb_record_rx_queue(skb, fp->rxq->rxq_id);
+ qede_skb_receive(edev, fp, fp->rxq, skb, vlan_tag);
+}
+
+static inline void qede_tpa_cont(struct qede_dev *edev,
+ struct qede_rx_queue *rxq,
+ struct eth_fast_path_rx_tpa_cont_cqe *cqe)
+{
+ int i;
+
+ for (i = 0; cqe->len_list[i]; i++)
+ qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
+ le16_to_cpu(cqe->len_list[i]));
+
+ if (unlikely(i > 1))
+ DP_ERR(edev,
+ "Strange - TPA cont with more than a single len_list entry\n");
+}
+
+static int qede_tpa_end(struct qede_dev *edev,
+ struct qede_fastpath *fp,
+ struct eth_fast_path_rx_tpa_end_cqe *cqe)
+{
+ struct qede_rx_queue *rxq = fp->rxq;
+ struct qede_agg_info *tpa_info;
+ struct sk_buff *skb;
+ int i;
+
+ tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
+ skb = tpa_info->skb;
+
+ if (tpa_info->buffer.page_offset == PAGE_SIZE)
+ dma_unmap_page(rxq->dev, tpa_info->buffer.mapping,
+ PAGE_SIZE, rxq->data_direction);
+
+ for (i = 0; cqe->len_list[i]; i++)
+ qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
+ le16_to_cpu(cqe->len_list[i]));
+ if (unlikely(i > 1))
+ DP_ERR(edev,
+ "Strange - TPA emd with more than a single len_list entry\n");
+
+ if (unlikely(tpa_info->state != QEDE_AGG_STATE_START))
+ goto err;
+
+ /* Sanity */
+ if (unlikely(cqe->num_of_bds != tpa_info->frag_id + 1))
+ DP_ERR(edev,
+ "Strange - TPA had %02x BDs, but SKB has only %d frags\n",
+ cqe->num_of_bds, tpa_info->frag_id);
+ if (unlikely(skb->len != le16_to_cpu(cqe->total_packet_len)))
+ DP_ERR(edev,
+ "Strange - total packet len [cqe] is %4x but SKB has len %04x\n",
+ le16_to_cpu(cqe->total_packet_len), skb->len);
+
+ /* Finalize the SKB */
+ skb->protocol = eth_type_trans(skb, edev->ndev);
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
+ * to skb_shinfo(skb)->gso_segs
+ */
+ NAPI_GRO_CB(skb)->count = le16_to_cpu(cqe->num_of_coalesced_segs);
+
+ qede_gro_receive(edev, fp, skb, tpa_info->vlan_tag);
+
+ tpa_info->state = QEDE_AGG_STATE_NONE;
+
+ return 1;
+err:
+ tpa_info->state = QEDE_AGG_STATE_NONE;
+
+ if (tpa_info->tpa_start_fail) {
+ qede_reuse_page(rxq, &tpa_info->buffer);
+ tpa_info->tpa_start_fail = false;
+ }
+
+ dev_kfree_skb_any(tpa_info->skb);
+ tpa_info->skb = NULL;
+ return 0;
+}
+
+static u8 qede_check_notunn_csum(u16 flag)
+{
+ u16 csum_flag = 0;
+ u8 csum = 0;
+
+ if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
+ PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
+ csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
+ PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
+ csum = QEDE_CSUM_UNNECESSARY;
+ }
+
+ csum_flag |= PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
+ PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
+
+ if (csum_flag & flag)
+ return QEDE_CSUM_ERROR;
+
+ return csum;
+}
+
+static u8 qede_check_csum(u16 flag)
+{
+ if (!qede_tunn_exist(flag))
+ return qede_check_notunn_csum(flag);
+ else
+ return qede_check_tunn_csum(flag);
+}
+
+static bool qede_pkt_is_ip_fragmented(struct eth_fast_path_rx_reg_cqe *cqe,
+ u16 flag)
+{
+ u8 tun_pars_flg = cqe->tunnel_pars_flags.flags;
+
+ if ((tun_pars_flg & (ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK <<
+ ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT)) ||
+ (flag & (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK <<
+ PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT)))
+ return true;
+
+ return false;
+}
+
+/* Return true iff packet is to be passed to stack */
+static bool qede_rx_xdp(struct qede_dev *edev,
+ struct qede_fastpath *fp,
+ struct qede_rx_queue *rxq,
+ struct bpf_prog *prog,
+ struct sw_rx_data *bd,
+ struct eth_fast_path_rx_reg_cqe *cqe,
+ u16 *data_offset, u16 *len)
+{
+ struct xdp_buff xdp;
+ enum xdp_action act;
+
+ xdp_init_buff(&xdp, rxq->rx_buf_seg_size, &rxq->xdp_rxq);
+ xdp_prepare_buff(&xdp, page_address(bd->data), *data_offset,
+ *len, false);
+
+ act = bpf_prog_run_xdp(prog, &xdp);
+
+ /* Recalculate, as XDP might have changed the headers */
+ *data_offset = xdp.data - xdp.data_hard_start;
+ *len = xdp.data_end - xdp.data;
+
+ if (act == XDP_PASS)
+ return true;
+
+ /* Count number of packets not to be passed to stack */
+ rxq->xdp_no_pass++;
+
+ switch (act) {
+ case XDP_TX:
+ /* We need the replacement buffer before transmit. */
+ if (unlikely(qede_alloc_rx_buffer(rxq, true))) {
+ qede_recycle_rx_bd_ring(rxq, 1);
+
+ trace_xdp_exception(edev->ndev, prog, act);
+ break;
+ }
+
+ /* Now if there's a transmission problem, we'd still have to
+ * throw current buffer, as replacement was already allocated.
+ */
+ if (unlikely(qede_xdp_xmit(fp->xdp_tx, bd->mapping,
+ *data_offset, *len, bd->data,
+ NULL))) {
+ dma_unmap_page(rxq->dev, bd->mapping, PAGE_SIZE,
+ rxq->data_direction);
+ __free_page(bd->data);
+
+ trace_xdp_exception(edev->ndev, prog, act);
+ } else {
+ dma_sync_single_for_device(rxq->dev,
+ bd->mapping + *data_offset,
+ *len, rxq->data_direction);
+ fp->xdp_xmit |= QEDE_XDP_TX;
+ }
+
+ /* Regardless, we've consumed an Rx BD */
+ qede_rx_bd_ring_consume(rxq);
+ break;
+ case XDP_REDIRECT:
+ /* We need the replacement buffer before transmit. */
+ if (unlikely(qede_alloc_rx_buffer(rxq, true))) {
+ qede_recycle_rx_bd_ring(rxq, 1);
+
+ trace_xdp_exception(edev->ndev, prog, act);
+ break;
+ }
+
+ dma_unmap_page(rxq->dev, bd->mapping, PAGE_SIZE,
+ rxq->data_direction);
+
+ if (unlikely(xdp_do_redirect(edev->ndev, &xdp, prog)))
+ DP_NOTICE(edev, "Failed to redirect the packet\n");
+ else
+ fp->xdp_xmit |= QEDE_XDP_REDIRECT;
+
+ qede_rx_bd_ring_consume(rxq);
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(edev->ndev, prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(edev->ndev, prog, act);
+ fallthrough;
+ case XDP_DROP:
+ qede_recycle_rx_bd_ring(rxq, cqe->bd_num);
+ }
+
+ return false;
+}
+
+static int qede_rx_build_jumbo(struct qede_dev *edev,
+ struct qede_rx_queue *rxq,
+ struct sk_buff *skb,
+ struct eth_fast_path_rx_reg_cqe *cqe,
+ u16 first_bd_len)
+{
+ u16 pkt_len = le16_to_cpu(cqe->pkt_len);
+ struct sw_rx_data *bd;
+ u16 bd_cons_idx;
+ u8 num_frags;
+
+ pkt_len -= first_bd_len;
+
+ /* We've already used one BD for the SKB. Now take care of the rest */
+ for (num_frags = cqe->bd_num - 1; num_frags > 0; num_frags--) {
+ u16 cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size :
+ pkt_len;
+
+ if (unlikely(!cur_size)) {
+ DP_ERR(edev,
+ "Still got %d BDs for mapping jumbo, but length became 0\n",
+ num_frags);
+ goto out;
+ }
+
+ /* We need a replacement buffer for each BD */
+ if (unlikely(qede_alloc_rx_buffer(rxq, true)))
+ goto out;
+
+ /* Now that we've allocated the replacement buffer,
+ * we can safely consume the next BD and map it to the SKB.
+ */
+ bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
+ bd = &rxq->sw_rx_ring[bd_cons_idx];
+ qede_rx_bd_ring_consume(rxq);
+
+ dma_unmap_page(rxq->dev, bd->mapping,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, bd->data,
+ rxq->rx_headroom, cur_size, PAGE_SIZE);
+
+ pkt_len -= cur_size;
+ }
+
+ if (unlikely(pkt_len))
+ DP_ERR(edev,
+ "Mapped all BDs of jumbo, but still have %d bytes\n",
+ pkt_len);
+
+out:
+ return num_frags;
+}
+
+static int qede_rx_process_tpa_cqe(struct qede_dev *edev,
+ struct qede_fastpath *fp,
+ struct qede_rx_queue *rxq,
+ union eth_rx_cqe *cqe,
+ enum eth_rx_cqe_type type)
+{
+ switch (type) {
+ case ETH_RX_CQE_TYPE_TPA_START:
+ qede_tpa_start(edev, rxq, &cqe->fast_path_tpa_start);
+ return 0;
+ case ETH_RX_CQE_TYPE_TPA_CONT:
+ qede_tpa_cont(edev, rxq, &cqe->fast_path_tpa_cont);
+ return 0;
+ case ETH_RX_CQE_TYPE_TPA_END:
+ return qede_tpa_end(edev, fp, &cqe->fast_path_tpa_end);
+ default:
+ return 0;
+ }
+}
+
+static int qede_rx_process_cqe(struct qede_dev *edev,
+ struct qede_fastpath *fp,
+ struct qede_rx_queue *rxq)
+{
+ struct bpf_prog *xdp_prog = READ_ONCE(rxq->xdp_prog);
+ struct eth_fast_path_rx_reg_cqe *fp_cqe;
+ u16 len, pad, bd_cons_idx, parse_flag;
+ enum eth_rx_cqe_type cqe_type;
+ union eth_rx_cqe *cqe;
+ struct sw_rx_data *bd;
+ struct sk_buff *skb;
+ __le16 flags;
+ u8 csum_flag;
+
+ /* Get the CQE from the completion ring */
+ cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
+ cqe_type = cqe->fast_path_regular.type;
+
+ /* Process an unlikely slowpath event */
+ if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) {
+ struct eth_slow_path_rx_cqe *sp_cqe;
+
+ sp_cqe = (struct eth_slow_path_rx_cqe *)cqe;
+ edev->ops->eth_cqe_completion(edev->cdev, fp->id, sp_cqe);
+ return 0;
+ }
+
+ /* Handle TPA cqes */
+ if (cqe_type != ETH_RX_CQE_TYPE_REGULAR)
+ return qede_rx_process_tpa_cqe(edev, fp, rxq, cqe, cqe_type);
+
+ /* Get the data from the SW ring; Consume it only after it's evident
+ * we wouldn't recycle it.
+ */
+ bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
+ bd = &rxq->sw_rx_ring[bd_cons_idx];
+
+ fp_cqe = &cqe->fast_path_regular;
+ len = le16_to_cpu(fp_cqe->len_on_first_bd);
+ pad = fp_cqe->placement_offset + rxq->rx_headroom;
+
+ /* Run eBPF program if one is attached */
+ if (xdp_prog)
+ if (!qede_rx_xdp(edev, fp, rxq, xdp_prog, bd, fp_cqe,
+ &pad, &len))
+ return 0;
+
+ /* If this is an error packet then drop it */
+ flags = cqe->fast_path_regular.pars_flags.flags;
+ parse_flag = le16_to_cpu(flags);
+
+ csum_flag = qede_check_csum(parse_flag);
+ if (unlikely(csum_flag == QEDE_CSUM_ERROR)) {
+ if (qede_pkt_is_ip_fragmented(fp_cqe, parse_flag))
+ rxq->rx_ip_frags++;
+ else
+ rxq->rx_hw_errors++;
+ }
+
+ /* Basic validation passed; Need to prepare an SKB. This would also
+ * guarantee to finally consume the first BD upon success.
+ */
+ skb = qede_rx_build_skb(edev, rxq, bd, len, pad);
+ if (!skb) {
+ rxq->rx_alloc_errors++;
+ qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num);
+ return 0;
+ }
+
+ /* In case of Jumbo packet, several PAGE_SIZEd buffers will be pointed
+ * by a single cqe.
+ */
+ if (fp_cqe->bd_num > 1) {
+ u16 unmapped_frags = qede_rx_build_jumbo(edev, rxq, skb,
+ fp_cqe, len);
+
+ if (unlikely(unmapped_frags > 0)) {
+ qede_recycle_rx_bd_ring(rxq, unmapped_frags);
+ dev_kfree_skb_any(skb);
+ return 0;
+ }
+ }
+
+ /* The SKB contains all the data. Now prepare meta-magic */
+ skb->protocol = eth_type_trans(skb, edev->ndev);
+ qede_get_rxhash(skb, fp_cqe->bitfields, fp_cqe->rss_hash);
+ qede_set_skb_csum(skb, csum_flag);
+ skb_record_rx_queue(skb, rxq->rxq_id);
+ qede_ptp_record_rx_ts(edev, cqe, skb);
+
+ /* SKB is prepared - pass it to stack */
+ qede_skb_receive(edev, fp, rxq, skb, le16_to_cpu(fp_cqe->vlan_tag));
+
+ return 1;
+}
+
+static int qede_rx_int(struct qede_fastpath *fp, int budget)
+{
+ struct qede_rx_queue *rxq = fp->rxq;
+ struct qede_dev *edev = fp->edev;
+ int work_done = 0, rcv_pkts = 0;
+ u16 hw_comp_cons, sw_comp_cons;
+
+ hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
+ sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
+
+ /* Memory barrier to prevent the CPU from doing speculative reads of CQE
+ * / BD in the while-loop before reading hw_comp_cons. If the CQE is
+ * read before it is written by FW, then FW writes CQE and SB, and then
+ * the CPU reads the hw_comp_cons, it will use an old CQE.
+ */
+ rmb();
+
+ /* Loop to complete all indicated BDs */
+ while ((sw_comp_cons != hw_comp_cons) && (work_done < budget)) {
+ rcv_pkts += qede_rx_process_cqe(edev, fp, rxq);
+ qed_chain_recycle_consumed(&rxq->rx_comp_ring);
+ sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
+ work_done++;
+ }
+
+ rxq->rcv_pkts += rcv_pkts;
+
+ /* Allocate replacement buffers */
+ while (rxq->num_rx_buffers - rxq->filled_buffers)
+ if (qede_alloc_rx_buffer(rxq, false))
+ break;
+
+ /* Update producers */
+ qede_update_rx_prod(edev, rxq);
+
+ return work_done;
+}
+
+static bool qede_poll_is_more_work(struct qede_fastpath *fp)
+{
+ qed_sb_update_sb_idx(fp->sb_info);
+
+ /* *_has_*_work() reads the status block, thus we need to ensure that
+ * status block indices have been actually read (qed_sb_update_sb_idx)
+ * prior to this check (*_has_*_work) so that we won't write the
+ * "newer" value of the status block to HW (if there was a DMA right
+ * after qede_has_rx_work and if there is no rmb, the memory reading
+ * (qed_sb_update_sb_idx) may be postponed to right before *_ack_sb).
+ * In this case there will never be another interrupt until there is
+ * another update of the status block, while there is still unhandled
+ * work.
+ */
+ rmb();
+
+ if (likely(fp->type & QEDE_FASTPATH_RX))
+ if (qede_has_rx_work(fp->rxq))
+ return true;
+
+ if (fp->type & QEDE_FASTPATH_XDP)
+ if (qede_txq_has_work(fp->xdp_tx))
+ return true;
+
+ if (likely(fp->type & QEDE_FASTPATH_TX)) {
+ int cos;
+
+ for_each_cos_in_txq(fp->edev, cos) {
+ if (qede_txq_has_work(&fp->txq[cos]))
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/*********************
+ * NDO & API related *
+ *********************/
+int qede_poll(struct napi_struct *napi, int budget)
+{
+ struct qede_fastpath *fp = container_of(napi, struct qede_fastpath,
+ napi);
+ struct qede_dev *edev = fp->edev;
+ int rx_work_done = 0;
+ u16 xdp_prod;
+
+ fp->xdp_xmit = 0;
+
+ if (likely(fp->type & QEDE_FASTPATH_TX)) {
+ int cos;
+
+ for_each_cos_in_txq(fp->edev, cos) {
+ if (qede_txq_has_work(&fp->txq[cos]))
+ qede_tx_int(edev, &fp->txq[cos]);
+ }
+ }
+
+ if ((fp->type & QEDE_FASTPATH_XDP) && qede_txq_has_work(fp->xdp_tx))
+ qede_xdp_tx_int(edev, fp->xdp_tx);
+
+ rx_work_done = (likely(fp->type & QEDE_FASTPATH_RX) &&
+ qede_has_rx_work(fp->rxq)) ?
+ qede_rx_int(fp, budget) : 0;
+
+ if (fp->xdp_xmit & QEDE_XDP_REDIRECT)
+ xdp_do_flush();
+
+ /* Handle case where we are called by netpoll with a budget of 0 */
+ if (rx_work_done < budget || !budget) {
+ if (!qede_poll_is_more_work(fp)) {
+ napi_complete_done(napi, rx_work_done);
+
+ /* Update and reenable interrupts */
+ qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
+ } else {
+ rx_work_done = budget;
+ }
+ }
+
+ if (fp->xdp_xmit & QEDE_XDP_TX) {
+ xdp_prod = qed_chain_get_prod_idx(&fp->xdp_tx->tx_pbl);
+
+ fp->xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod);
+ qede_update_tx_producer(fp->xdp_tx);
+ }
+
+ return rx_work_done;
+}
+
+irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie)
+{
+ struct qede_fastpath *fp = fp_cookie;
+
+ qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 /*do not update*/);
+
+ napi_schedule_irqoff(&fp->napi);
+ return IRQ_HANDLED;
+}
+
+/* Main transmit function */
+netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+ struct netdev_queue *netdev_txq;
+ struct qede_tx_queue *txq;
+ struct eth_tx_1st_bd *first_bd;
+ struct eth_tx_2nd_bd *second_bd = NULL;
+ struct eth_tx_3rd_bd *third_bd = NULL;
+ struct eth_tx_bd *tx_data_bd = NULL;
+ u16 txq_index, val = 0;
+ u8 nbd = 0;
+ dma_addr_t mapping;
+ int rc, frag_idx = 0, ipv6_ext = 0;
+ u8 xmit_type;
+ u16 idx;
+ u16 hlen;
+ bool data_split = false;
+
+ /* Get tx-queue context and netdev index */
+ txq_index = skb_get_queue_mapping(skb);
+ WARN_ON(txq_index >= QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc);
+ txq = QEDE_NDEV_TXQ_ID_TO_TXQ(edev, txq_index);
+ netdev_txq = netdev_get_tx_queue(ndev, txq_index);
+
+ WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1));
+
+ xmit_type = qede_xmit_type(skb, &ipv6_ext);
+
+#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
+ if (qede_pkt_req_lin(skb, xmit_type)) {
+ if (skb_linearize(skb)) {
+ txq->tx_mem_alloc_err++;
+
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+ }
+#endif
+
+ /* Fill the entry in the SW ring and the BDs in the FW ring */
+ idx = txq->sw_tx_prod;
+ txq->sw_tx_ring.skbs[idx].skb = skb;
+ first_bd = (struct eth_tx_1st_bd *)
+ qed_chain_produce(&txq->tx_pbl);
+ memset(first_bd, 0, sizeof(*first_bd));
+ first_bd->data.bd_flags.bitfields =
+ 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
+
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
+ qede_ptp_tx_ts(edev, skb);
+
+ /* Map skb linear data for DMA and set in the first BD */
+ mapping = dma_map_single(txq->dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(txq->dev, mapping))) {
+ DP_NOTICE(edev, "SKB mapping failed\n");
+ qede_free_failed_tx_pkt(txq, first_bd, 0, false);
+ qede_update_tx_producer(txq);
+ return NETDEV_TX_OK;
+ }
+ nbd++;
+ BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb));
+
+ /* In case there is IPv6 with extension headers or LSO we need 2nd and
+ * 3rd BDs.
+ */
+ if (unlikely((xmit_type & XMIT_LSO) | ipv6_ext)) {
+ second_bd = (struct eth_tx_2nd_bd *)
+ qed_chain_produce(&txq->tx_pbl);
+ memset(second_bd, 0, sizeof(*second_bd));
+
+ nbd++;
+ third_bd = (struct eth_tx_3rd_bd *)
+ qed_chain_produce(&txq->tx_pbl);
+ memset(third_bd, 0, sizeof(*third_bd));
+
+ nbd++;
+ /* We need to fill in additional data in second_bd... */
+ tx_data_bd = (struct eth_tx_bd *)second_bd;
+ }
+
+ if (skb_vlan_tag_present(skb)) {
+ first_bd->data.vlan = cpu_to_le16(skb_vlan_tag_get(skb));
+ first_bd->data.bd_flags.bitfields |=
+ 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
+ }
+
+ /* Fill the parsing flags & params according to the requested offload */
+ if (xmit_type & XMIT_L4_CSUM) {
+ /* We don't re-calculate IP checksum as it is already done by
+ * the upper stack
+ */
+ first_bd->data.bd_flags.bitfields |=
+ 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
+
+ if (xmit_type & XMIT_ENC) {
+ first_bd->data.bd_flags.bitfields |=
+ 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
+
+ val |= (1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT);
+ }
+
+ /* Legacy FW had flipped behavior in regard to this bit -
+ * I.e., needed to set to prevent FW from touching encapsulated
+ * packets when it didn't need to.
+ */
+ if (unlikely(txq->is_legacy))
+ val ^= (1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT);
+
+ /* If the packet is IPv6 with extension header, indicate that
+ * to FW and pass few params, since the device cracker doesn't
+ * support parsing IPv6 with extension header/s.
+ */
+ if (unlikely(ipv6_ext))
+ qede_set_params_for_ipv6_ext(skb, second_bd, third_bd);
+ }
+
+ if (xmit_type & XMIT_LSO) {
+ first_bd->data.bd_flags.bitfields |=
+ (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT);
+ third_bd->data.lso_mss =
+ cpu_to_le16(skb_shinfo(skb)->gso_size);
+
+ if (unlikely(xmit_type & XMIT_ENC)) {
+ first_bd->data.bd_flags.bitfields |=
+ 1 << ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
+
+ if (xmit_type & XMIT_ENC_GSO_L4_CSUM) {
+ u8 tmp = ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
+
+ first_bd->data.bd_flags.bitfields |= 1 << tmp;
+ }
+ hlen = qede_get_skb_hlen(skb, true);
+ } else {
+ first_bd->data.bd_flags.bitfields |=
+ 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
+ hlen = qede_get_skb_hlen(skb, false);
+ }
+
+ /* @@@TBD - if will not be removed need to check */
+ third_bd->data.bitfields |=
+ cpu_to_le16(1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
+
+ /* Make life easier for FW guys who can't deal with header and
+ * data on same BD. If we need to split, use the second bd...
+ */
+ if (unlikely(skb_headlen(skb) > hlen)) {
+ DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
+ "TSO split header size is %d (%x:%x)\n",
+ first_bd->nbytes, first_bd->addr.hi,
+ first_bd->addr.lo);
+
+ mapping = HILO_U64(le32_to_cpu(first_bd->addr.hi),
+ le32_to_cpu(first_bd->addr.lo)) +
+ hlen;
+
+ BD_SET_UNMAP_ADDR_LEN(tx_data_bd, mapping,
+ le16_to_cpu(first_bd->nbytes) -
+ hlen);
+
+ /* this marks the BD as one that has no
+ * individual mapping
+ */
+ txq->sw_tx_ring.skbs[idx].flags |= QEDE_TSO_SPLIT_BD;
+
+ first_bd->nbytes = cpu_to_le16(hlen);
+
+ tx_data_bd = (struct eth_tx_bd *)third_bd;
+ data_split = true;
+ }
+ } else {
+ if (unlikely(skb->len > ETH_TX_MAX_NON_LSO_PKT_LEN)) {
+ DP_ERR(edev, "Unexpected non LSO skb length = 0x%x\n", skb->len);
+ qede_free_failed_tx_pkt(txq, first_bd, 0, false);
+ qede_update_tx_producer(txq);
+ return NETDEV_TX_OK;
+ }
+
+ val |= ((skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
+ ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT);
+ }
+
+ first_bd->data.bitfields = cpu_to_le16(val);
+
+ /* Handle fragmented skb */
+ /* special handle for frags inside 2nd and 3rd bds.. */
+ while (tx_data_bd && frag_idx < skb_shinfo(skb)->nr_frags) {
+ rc = map_frag_to_bd(txq,
+ &skb_shinfo(skb)->frags[frag_idx],
+ tx_data_bd);
+ if (rc) {
+ qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split);
+ qede_update_tx_producer(txq);
+ return NETDEV_TX_OK;
+ }
+
+ if (tx_data_bd == (struct eth_tx_bd *)second_bd)
+ tx_data_bd = (struct eth_tx_bd *)third_bd;
+ else
+ tx_data_bd = NULL;
+
+ frag_idx++;
+ }
+
+ /* map last frags into 4th, 5th .... */
+ for (; frag_idx < skb_shinfo(skb)->nr_frags; frag_idx++, nbd++) {
+ tx_data_bd = (struct eth_tx_bd *)
+ qed_chain_produce(&txq->tx_pbl);
+
+ memset(tx_data_bd, 0, sizeof(*tx_data_bd));
+
+ rc = map_frag_to_bd(txq,
+ &skb_shinfo(skb)->frags[frag_idx],
+ tx_data_bd);
+ if (rc) {
+ qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split);
+ qede_update_tx_producer(txq);
+ return NETDEV_TX_OK;
+ }
+ }
+
+ /* update the first BD with the actual num BDs */
+ first_bd->data.nbds = nbd;
+
+ netdev_tx_sent_queue(netdev_txq, skb->len);
+
+ skb_tx_timestamp(skb);
+
+ /* Advance packet producer only before sending the packet since mapping
+ * of pages may fail.
+ */
+ txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers;
+
+ /* 'next page' entries are counted in the producer value */
+ txq->tx_db.data.bd_prod =
+ cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
+
+ if (!netdev_xmit_more() || netif_xmit_stopped(netdev_txq))
+ qede_update_tx_producer(txq);
+
+ if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl)
+ < (MAX_SKB_FRAGS + 1))) {
+ if (netdev_xmit_more())
+ qede_update_tx_producer(txq);
+
+ netif_tx_stop_queue(netdev_txq);
+ txq->stopped_cnt++;
+ DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
+ "Stop queue was called\n");
+ /* paired memory barrier is in qede_tx_int(), we have to keep
+ * ordering of set_bit() in netif_tx_stop_queue() and read of
+ * fp->bd_tx_cons
+ */
+ smp_mb();
+
+ if ((qed_chain_get_elem_left(&txq->tx_pbl) >=
+ (MAX_SKB_FRAGS + 1)) &&
+ (edev->state == QEDE_STATE_OPEN)) {
+ netif_tx_wake_queue(netdev_txq);
+ DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
+ "Wake queue was called\n");
+ }
+ }
+
+ return NETDEV_TX_OK;
+}
+
+u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ int total_txq;
+
+ total_txq = QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc;
+
+ return QEDE_TSS_COUNT(edev) ?
+ netdev_pick_tx(dev, skb, NULL) % total_txq : 0;
+}
+
+/* 8B udp header + 8B base tunnel header + 32B option length */
+#define QEDE_MAX_TUN_HDR_LEN 48
+
+netdev_features_t qede_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
+{
+ if (skb->encapsulation) {
+ u8 l4_proto = 0;
+
+ switch (vlan_get_protocol(skb)) {
+ case htons(ETH_P_IP):
+ l4_proto = ip_hdr(skb)->protocol;
+ break;
+ case htons(ETH_P_IPV6):
+ l4_proto = ipv6_hdr(skb)->nexthdr;
+ break;
+ default:
+ return features;
+ }
+
+ /* Disable offloads for geneve tunnels, as HW can't parse
+ * the geneve header which has option length greater than 32b
+ * and disable offloads for the ports which are not offloaded.
+ */
+ if (l4_proto == IPPROTO_UDP) {
+ struct qede_dev *edev = netdev_priv(dev);
+ u16 hdrlen, vxln_port, gnv_port;
+
+ hdrlen = QEDE_MAX_TUN_HDR_LEN;
+ vxln_port = edev->vxlan_dst_port;
+ gnv_port = edev->geneve_dst_port;
+
+ if ((skb_inner_mac_header(skb) -
+ skb_transport_header(skb)) > hdrlen ||
+ (ntohs(udp_hdr(skb)->dest) != vxln_port &&
+ ntohs(udp_hdr(skb)->dest) != gnv_port))
+ return features & ~(NETIF_F_CSUM_MASK |
+ NETIF_F_GSO_MASK);
+ } else if (l4_proto == IPPROTO_IPIP) {
+ /* IPIP tunnels are unknown to the device or at least unsupported natively,
+ * offloads for them can't be done trivially, so disable them for such skb.
+ */
+ return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+ }
+ }
+
+ return features;
+}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
new file mode 100644
index 000000000..dc43e7414
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -0,0 +1,2950 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/* QLogic qede NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ * Copyright (c) 2019-2020 Marvell International Ltd.
+ */
+
+#include <linux/crash_dump.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/string.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <asm/byteorder.h>
+#include <asm/param.h>
+#include <linux/io.h>
+#include <linux/netdev_features.h>
+#include <linux/udp.h>
+#include <linux/tcp.h>
+#include <net/udp_tunnel.h>
+#include <linux/ip.h>
+#include <net/ipv6.h>
+#include <net/tcp.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/pkt_sched.h>
+#include <linux/ethtool.h>
+#include <linux/in.h>
+#include <linux/random.h>
+#include <net/ip6_checksum.h>
+#include <linux/bitops.h>
+#include <linux/vmalloc.h>
+#include <linux/aer.h>
+#include "qede.h"
+#include "qede_ptp.h"
+
+MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Ethernet Driver");
+MODULE_LICENSE("GPL");
+
+static uint debug;
+module_param(debug, uint, 0);
+MODULE_PARM_DESC(debug, " Default debug msglevel");
+
+static const struct qed_eth_ops *qed_ops;
+
+#define CHIP_NUM_57980S_40 0x1634
+#define CHIP_NUM_57980S_10 0x1666
+#define CHIP_NUM_57980S_MF 0x1636
+#define CHIP_NUM_57980S_100 0x1644
+#define CHIP_NUM_57980S_50 0x1654
+#define CHIP_NUM_57980S_25 0x1656
+#define CHIP_NUM_57980S_IOV 0x1664
+#define CHIP_NUM_AH 0x8070
+#define CHIP_NUM_AH_IOV 0x8090
+
+#ifndef PCI_DEVICE_ID_NX2_57980E
+#define PCI_DEVICE_ID_57980S_40 CHIP_NUM_57980S_40
+#define PCI_DEVICE_ID_57980S_10 CHIP_NUM_57980S_10
+#define PCI_DEVICE_ID_57980S_MF CHIP_NUM_57980S_MF
+#define PCI_DEVICE_ID_57980S_100 CHIP_NUM_57980S_100
+#define PCI_DEVICE_ID_57980S_50 CHIP_NUM_57980S_50
+#define PCI_DEVICE_ID_57980S_25 CHIP_NUM_57980S_25
+#define PCI_DEVICE_ID_57980S_IOV CHIP_NUM_57980S_IOV
+#define PCI_DEVICE_ID_AH CHIP_NUM_AH
+#define PCI_DEVICE_ID_AH_IOV CHIP_NUM_AH_IOV
+
+#endif
+
+enum qede_pci_private {
+ QEDE_PRIVATE_PF,
+ QEDE_PRIVATE_VF
+};
+
+static const struct pci_device_id qede_pci_tbl[] = {
+ {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), QEDE_PRIVATE_PF},
+ {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), QEDE_PRIVATE_PF},
+ {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), QEDE_PRIVATE_PF},
+ {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF},
+ {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF},
+ {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF},
+#ifdef CONFIG_QED_SRIOV
+ {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF},
+#endif
+ {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH), QEDE_PRIVATE_PF},
+#ifdef CONFIG_QED_SRIOV
+ {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH_IOV), QEDE_PRIVATE_VF},
+#endif
+ { 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, qede_pci_tbl);
+
+static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id);
+static pci_ers_result_t
+qede_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state);
+
+#define TX_TIMEOUT (5 * HZ)
+
+/* Utilize last protocol index for XDP */
+#define XDP_PI 11
+
+static void qede_remove(struct pci_dev *pdev);
+static void qede_shutdown(struct pci_dev *pdev);
+static void qede_link_update(void *dev, struct qed_link_output *link);
+static void qede_schedule_recovery_handler(void *dev);
+static void qede_recovery_handler(struct qede_dev *edev);
+static void qede_schedule_hw_err_handler(void *dev,
+ enum qed_hw_err_type err_type);
+static void qede_get_eth_tlv_data(void *edev, void *data);
+static void qede_get_generic_tlv_data(void *edev,
+ struct qed_generic_tlvs *data);
+static void qede_generic_hw_err_handler(struct qede_dev *edev);
+#ifdef CONFIG_QED_SRIOV
+static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos,
+ __be16 vlan_proto)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+
+ if (vlan > 4095) {
+ DP_NOTICE(edev, "Illegal vlan value %d\n", vlan);
+ return -EINVAL;
+ }
+
+ if (vlan_proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
+ DP_VERBOSE(edev, QED_MSG_IOV, "Setting Vlan 0x%04x to VF [%d]\n",
+ vlan, vf);
+
+ return edev->ops->iov->set_vlan(edev->cdev, vlan, vf);
+}
+
+static int qede_set_vf_mac(struct net_device *ndev, int vfidx, u8 *mac)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+
+ DP_VERBOSE(edev, QED_MSG_IOV, "Setting MAC %pM to VF [%d]\n", mac, vfidx);
+
+ if (!is_valid_ether_addr(mac)) {
+ DP_VERBOSE(edev, QED_MSG_IOV, "MAC address isn't valid\n");
+ return -EINVAL;
+ }
+
+ return edev->ops->iov->set_mac(edev->cdev, mac, vfidx);
+}
+
+static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param)
+{
+ struct qede_dev *edev = netdev_priv(pci_get_drvdata(pdev));
+ struct qed_dev_info *qed_info = &edev->dev_info.common;
+ struct qed_update_vport_params *vport_params;
+ int rc;
+
+ vport_params = vzalloc(sizeof(*vport_params));
+ if (!vport_params)
+ return -ENOMEM;
+ DP_VERBOSE(edev, QED_MSG_IOV, "Requested %d VFs\n", num_vfs_param);
+
+ rc = edev->ops->iov->configure(edev->cdev, num_vfs_param);
+
+ /* Enable/Disable Tx switching for PF */
+ if ((rc == num_vfs_param) && netif_running(edev->ndev) &&
+ !qed_info->b_inter_pf_switch && qed_info->tx_switching) {
+ vport_params->vport_id = 0;
+ vport_params->update_tx_switching_flg = 1;
+ vport_params->tx_switching_flg = num_vfs_param ? 1 : 0;
+ edev->ops->vport_update(edev->cdev, vport_params);
+ }
+
+ vfree(vport_params);
+ return rc;
+}
+#endif
+
+static int __maybe_unused qede_suspend(struct device *dev)
+{
+ dev_info(dev, "Device does not support suspend operation\n");
+
+ return -EOPNOTSUPP;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(qede_pm_ops, qede_suspend, NULL);
+
+static const struct pci_error_handlers qede_err_handler = {
+ .error_detected = qede_io_error_detected,
+};
+
+static struct pci_driver qede_pci_driver = {
+ .name = "qede",
+ .id_table = qede_pci_tbl,
+ .probe = qede_probe,
+ .remove = qede_remove,
+ .shutdown = qede_shutdown,
+#ifdef CONFIG_QED_SRIOV
+ .sriov_configure = qede_sriov_configure,
+#endif
+ .err_handler = &qede_err_handler,
+ .driver.pm = &qede_pm_ops,
+};
+
+static struct qed_eth_cb_ops qede_ll_ops = {
+ {
+#ifdef CONFIG_RFS_ACCEL
+ .arfs_filter_op = qede_arfs_filter_op,
+#endif
+ .link_update = qede_link_update,
+ .schedule_recovery_handler = qede_schedule_recovery_handler,
+ .schedule_hw_err_handler = qede_schedule_hw_err_handler,
+ .get_generic_tlv_data = qede_get_generic_tlv_data,
+ .get_protocol_tlv_data = qede_get_eth_tlv_data,
+ },
+ .force_mac = qede_force_mac,
+ .ports_update = qede_udp_ports_update,
+};
+
+static int qede_netdev_event(struct notifier_block *this, unsigned long event,
+ void *ptr)
+{
+ struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
+ struct ethtool_drvinfo drvinfo;
+ struct qede_dev *edev;
+
+ if (event != NETDEV_CHANGENAME && event != NETDEV_CHANGEADDR)
+ goto done;
+
+ /* Check whether this is a qede device */
+ if (!ndev || !ndev->ethtool_ops || !ndev->ethtool_ops->get_drvinfo)
+ goto done;
+
+ memset(&drvinfo, 0, sizeof(drvinfo));
+ ndev->ethtool_ops->get_drvinfo(ndev, &drvinfo);
+ if (strcmp(drvinfo.driver, "qede"))
+ goto done;
+ edev = netdev_priv(ndev);
+
+ switch (event) {
+ case NETDEV_CHANGENAME:
+ /* Notify qed of the name change */
+ if (!edev->ops || !edev->ops->common)
+ goto done;
+ edev->ops->common->set_name(edev->cdev, edev->ndev->name);
+ break;
+ case NETDEV_CHANGEADDR:
+ edev = netdev_priv(ndev);
+ qede_rdma_event_changeaddr(edev);
+ break;
+ }
+
+done:
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block qede_netdev_notifier = {
+ .notifier_call = qede_netdev_event,
+};
+
+static
+int __init qede_init(void)
+{
+ int ret;
+
+ pr_info("qede init: QLogic FastLinQ 4xxxx Ethernet Driver qede\n");
+
+ qede_forced_speed_maps_init();
+
+ qed_ops = qed_get_eth_ops();
+ if (!qed_ops) {
+ pr_notice("Failed to get qed ethtool operations\n");
+ return -EINVAL;
+ }
+
+ /* Must register notifier before pci ops, since we might miss
+ * interface rename after pci probe and netdev registration.
+ */
+ ret = register_netdevice_notifier(&qede_netdev_notifier);
+ if (ret) {
+ pr_notice("Failed to register netdevice_notifier\n");
+ qed_put_eth_ops();
+ return -EINVAL;
+ }
+
+ ret = pci_register_driver(&qede_pci_driver);
+ if (ret) {
+ pr_notice("Failed to register driver\n");
+ unregister_netdevice_notifier(&qede_netdev_notifier);
+ qed_put_eth_ops();
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void __exit qede_cleanup(void)
+{
+ if (debug & QED_LOG_INFO_MASK)
+ pr_info("qede_cleanup called\n");
+
+ unregister_netdevice_notifier(&qede_netdev_notifier);
+ pci_unregister_driver(&qede_pci_driver);
+ qed_put_eth_ops();
+}
+
+module_init(qede_init);
+module_exit(qede_cleanup);
+
+static int qede_open(struct net_device *ndev);
+static int qede_close(struct net_device *ndev);
+
+void qede_fill_by_demand_stats(struct qede_dev *edev)
+{
+ struct qede_stats_common *p_common = &edev->stats.common;
+ struct qed_eth_stats stats;
+
+ edev->ops->get_vport_stats(edev->cdev, &stats);
+
+ spin_lock(&edev->stats_lock);
+
+ p_common->no_buff_discards = stats.common.no_buff_discards;
+ p_common->packet_too_big_discard = stats.common.packet_too_big_discard;
+ p_common->ttl0_discard = stats.common.ttl0_discard;
+ p_common->rx_ucast_bytes = stats.common.rx_ucast_bytes;
+ p_common->rx_mcast_bytes = stats.common.rx_mcast_bytes;
+ p_common->rx_bcast_bytes = stats.common.rx_bcast_bytes;
+ p_common->rx_ucast_pkts = stats.common.rx_ucast_pkts;
+ p_common->rx_mcast_pkts = stats.common.rx_mcast_pkts;
+ p_common->rx_bcast_pkts = stats.common.rx_bcast_pkts;
+ p_common->mftag_filter_discards = stats.common.mftag_filter_discards;
+ p_common->mac_filter_discards = stats.common.mac_filter_discards;
+ p_common->gft_filter_drop = stats.common.gft_filter_drop;
+
+ p_common->tx_ucast_bytes = stats.common.tx_ucast_bytes;
+ p_common->tx_mcast_bytes = stats.common.tx_mcast_bytes;
+ p_common->tx_bcast_bytes = stats.common.tx_bcast_bytes;
+ p_common->tx_ucast_pkts = stats.common.tx_ucast_pkts;
+ p_common->tx_mcast_pkts = stats.common.tx_mcast_pkts;
+ p_common->tx_bcast_pkts = stats.common.tx_bcast_pkts;
+ p_common->tx_err_drop_pkts = stats.common.tx_err_drop_pkts;
+ p_common->coalesced_pkts = stats.common.tpa_coalesced_pkts;
+ p_common->coalesced_events = stats.common.tpa_coalesced_events;
+ p_common->coalesced_aborts_num = stats.common.tpa_aborts_num;
+ p_common->non_coalesced_pkts = stats.common.tpa_not_coalesced_pkts;
+ p_common->coalesced_bytes = stats.common.tpa_coalesced_bytes;
+
+ p_common->rx_64_byte_packets = stats.common.rx_64_byte_packets;
+ p_common->rx_65_to_127_byte_packets =
+ stats.common.rx_65_to_127_byte_packets;
+ p_common->rx_128_to_255_byte_packets =
+ stats.common.rx_128_to_255_byte_packets;
+ p_common->rx_256_to_511_byte_packets =
+ stats.common.rx_256_to_511_byte_packets;
+ p_common->rx_512_to_1023_byte_packets =
+ stats.common.rx_512_to_1023_byte_packets;
+ p_common->rx_1024_to_1518_byte_packets =
+ stats.common.rx_1024_to_1518_byte_packets;
+ p_common->rx_crc_errors = stats.common.rx_crc_errors;
+ p_common->rx_mac_crtl_frames = stats.common.rx_mac_crtl_frames;
+ p_common->rx_pause_frames = stats.common.rx_pause_frames;
+ p_common->rx_pfc_frames = stats.common.rx_pfc_frames;
+ p_common->rx_align_errors = stats.common.rx_align_errors;
+ p_common->rx_carrier_errors = stats.common.rx_carrier_errors;
+ p_common->rx_oversize_packets = stats.common.rx_oversize_packets;
+ p_common->rx_jabbers = stats.common.rx_jabbers;
+ p_common->rx_undersize_packets = stats.common.rx_undersize_packets;
+ p_common->rx_fragments = stats.common.rx_fragments;
+ p_common->tx_64_byte_packets = stats.common.tx_64_byte_packets;
+ p_common->tx_65_to_127_byte_packets =
+ stats.common.tx_65_to_127_byte_packets;
+ p_common->tx_128_to_255_byte_packets =
+ stats.common.tx_128_to_255_byte_packets;
+ p_common->tx_256_to_511_byte_packets =
+ stats.common.tx_256_to_511_byte_packets;
+ p_common->tx_512_to_1023_byte_packets =
+ stats.common.tx_512_to_1023_byte_packets;
+ p_common->tx_1024_to_1518_byte_packets =
+ stats.common.tx_1024_to_1518_byte_packets;
+ p_common->tx_pause_frames = stats.common.tx_pause_frames;
+ p_common->tx_pfc_frames = stats.common.tx_pfc_frames;
+ p_common->brb_truncates = stats.common.brb_truncates;
+ p_common->brb_discards = stats.common.brb_discards;
+ p_common->tx_mac_ctrl_frames = stats.common.tx_mac_ctrl_frames;
+ p_common->link_change_count = stats.common.link_change_count;
+ p_common->ptp_skip_txts = edev->ptp_skip_txts;
+
+ if (QEDE_IS_BB(edev)) {
+ struct qede_stats_bb *p_bb = &edev->stats.bb;
+
+ p_bb->rx_1519_to_1522_byte_packets =
+ stats.bb.rx_1519_to_1522_byte_packets;
+ p_bb->rx_1519_to_2047_byte_packets =
+ stats.bb.rx_1519_to_2047_byte_packets;
+ p_bb->rx_2048_to_4095_byte_packets =
+ stats.bb.rx_2048_to_4095_byte_packets;
+ p_bb->rx_4096_to_9216_byte_packets =
+ stats.bb.rx_4096_to_9216_byte_packets;
+ p_bb->rx_9217_to_16383_byte_packets =
+ stats.bb.rx_9217_to_16383_byte_packets;
+ p_bb->tx_1519_to_2047_byte_packets =
+ stats.bb.tx_1519_to_2047_byte_packets;
+ p_bb->tx_2048_to_4095_byte_packets =
+ stats.bb.tx_2048_to_4095_byte_packets;
+ p_bb->tx_4096_to_9216_byte_packets =
+ stats.bb.tx_4096_to_9216_byte_packets;
+ p_bb->tx_9217_to_16383_byte_packets =
+ stats.bb.tx_9217_to_16383_byte_packets;
+ p_bb->tx_lpi_entry_count = stats.bb.tx_lpi_entry_count;
+ p_bb->tx_total_collisions = stats.bb.tx_total_collisions;
+ } else {
+ struct qede_stats_ah *p_ah = &edev->stats.ah;
+
+ p_ah->rx_1519_to_max_byte_packets =
+ stats.ah.rx_1519_to_max_byte_packets;
+ p_ah->tx_1519_to_max_byte_packets =
+ stats.ah.tx_1519_to_max_byte_packets;
+ }
+
+ spin_unlock(&edev->stats_lock);
+}
+
+static void qede_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ struct qede_stats_common *p_common;
+
+ p_common = &edev->stats.common;
+
+ spin_lock(&edev->stats_lock);
+
+ stats->rx_packets = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
+ p_common->rx_bcast_pkts;
+ stats->tx_packets = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
+ p_common->tx_bcast_pkts;
+
+ stats->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes +
+ p_common->rx_bcast_bytes;
+ stats->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes +
+ p_common->tx_bcast_bytes;
+
+ stats->tx_errors = p_common->tx_err_drop_pkts;
+ stats->multicast = p_common->rx_mcast_pkts + p_common->rx_bcast_pkts;
+
+ stats->rx_fifo_errors = p_common->no_buff_discards;
+
+ if (QEDE_IS_BB(edev))
+ stats->collisions = edev->stats.bb.tx_total_collisions;
+ stats->rx_crc_errors = p_common->rx_crc_errors;
+ stats->rx_frame_errors = p_common->rx_align_errors;
+
+ spin_unlock(&edev->stats_lock);
+}
+
+#ifdef CONFIG_QED_SRIOV
+static int qede_get_vf_config(struct net_device *dev, int vfidx,
+ struct ifla_vf_info *ivi)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ if (!edev->ops)
+ return -EINVAL;
+
+ return edev->ops->iov->get_config(edev->cdev, vfidx, ivi);
+}
+
+static int qede_set_vf_rate(struct net_device *dev, int vfidx,
+ int min_tx_rate, int max_tx_rate)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ return edev->ops->iov->set_rate(edev->cdev, vfidx, min_tx_rate,
+ max_tx_rate);
+}
+
+static int qede_set_vf_spoofchk(struct net_device *dev, int vfidx, bool val)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ if (!edev->ops)
+ return -EINVAL;
+
+ return edev->ops->iov->set_spoof(edev->cdev, vfidx, val);
+}
+
+static int qede_set_vf_link_state(struct net_device *dev, int vfidx,
+ int link_state)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ if (!edev->ops)
+ return -EINVAL;
+
+ return edev->ops->iov->set_link_state(edev->cdev, vfidx, link_state);
+}
+
+static int qede_set_vf_trust(struct net_device *dev, int vfidx, bool setting)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ if (!edev->ops)
+ return -EINVAL;
+
+ return edev->ops->iov->set_trust(edev->cdev, vfidx, setting);
+}
+#endif
+
+static int qede_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return -EAGAIN;
+
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return qede_ptp_hw_ts(edev, ifr);
+ default:
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "default IOCTL cmd 0x%x\n", cmd);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static void qede_fp_sb_dump(struct qede_dev *edev, struct qede_fastpath *fp)
+{
+ char *p_sb = (char *)fp->sb_info->sb_virt;
+ u32 sb_size, i;
+
+ sb_size = sizeof(struct status_block);
+
+ for (i = 0; i < sb_size; i += 8)
+ DP_NOTICE(edev,
+ "%02hhX %02hhX %02hhX %02hhX %02hhX %02hhX %02hhX %02hhX\n",
+ p_sb[i], p_sb[i + 1], p_sb[i + 2], p_sb[i + 3],
+ p_sb[i + 4], p_sb[i + 5], p_sb[i + 6], p_sb[i + 7]);
+}
+
+static void
+qede_txq_fp_log_metadata(struct qede_dev *edev,
+ struct qede_fastpath *fp, struct qede_tx_queue *txq)
+{
+ struct qed_chain *p_chain = &txq->tx_pbl;
+
+ /* Dump txq/fp/sb ids etc. other metadata */
+ DP_NOTICE(edev,
+ "fpid 0x%x sbid 0x%x txqid [0x%x] ndev_qid [0x%x] cos [0x%x] p_chain %p cap %d size %d jiffies %lu HZ 0x%x\n",
+ fp->id, fp->sb_info->igu_sb_id, txq->index, txq->ndev_txq_id, txq->cos,
+ p_chain, p_chain->capacity, p_chain->size, jiffies, HZ);
+
+ /* Dump all the relevant prod/cons indexes */
+ DP_NOTICE(edev,
+ "hw cons %04x sw_tx_prod=0x%x, sw_tx_cons=0x%x, bd_prod 0x%x bd_cons 0x%x\n",
+ le16_to_cpu(*txq->hw_cons_ptr), txq->sw_tx_prod, txq->sw_tx_cons,
+ qed_chain_get_prod_idx(p_chain), qed_chain_get_cons_idx(p_chain));
+}
+
+static void
+qede_tx_log_print(struct qede_dev *edev, struct qede_fastpath *fp, struct qede_tx_queue *txq)
+{
+ struct qed_sb_info_dbg sb_dbg;
+ int rc;
+
+ /* sb info */
+ qede_fp_sb_dump(edev, fp);
+
+ memset(&sb_dbg, 0, sizeof(sb_dbg));
+ rc = edev->ops->common->get_sb_info(edev->cdev, fp->sb_info, (u16)fp->id, &sb_dbg);
+
+ DP_NOTICE(edev, "IGU: prod %08x cons %08x CAU Tx %04x\n",
+ sb_dbg.igu_prod, sb_dbg.igu_cons, sb_dbg.pi[TX_PI(txq->cos)]);
+
+ /* report to mfw */
+ edev->ops->common->mfw_report(edev->cdev,
+ "Txq[%d]: FW cons [host] %04x, SW cons %04x, SW prod %04x [Jiffies %lu]\n",
+ txq->index, le16_to_cpu(*txq->hw_cons_ptr),
+ qed_chain_get_cons_idx(&txq->tx_pbl),
+ qed_chain_get_prod_idx(&txq->tx_pbl), jiffies);
+ if (!rc)
+ edev->ops->common->mfw_report(edev->cdev,
+ "Txq[%d]: SB[0x%04x] - IGU: prod %08x cons %08x CAU Tx %04x\n",
+ txq->index, fp->sb_info->igu_sb_id,
+ sb_dbg.igu_prod, sb_dbg.igu_cons,
+ sb_dbg.pi[TX_PI(txq->cos)]);
+}
+
+static void qede_tx_timeout(struct net_device *dev, unsigned int txqueue)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ int i;
+
+ netif_carrier_off(dev);
+ DP_NOTICE(edev, "TX timeout on queue %u!\n", txqueue);
+
+ for_each_queue(i) {
+ struct qede_tx_queue *txq;
+ struct qede_fastpath *fp;
+ int cos;
+
+ fp = &edev->fp_array[i];
+ if (!(fp->type & QEDE_FASTPATH_TX))
+ continue;
+
+ for_each_cos_in_txq(edev, cos) {
+ txq = &fp->txq[cos];
+
+ /* Dump basic metadata for all queues */
+ qede_txq_fp_log_metadata(edev, fp, txq);
+
+ if (qed_chain_get_cons_idx(&txq->tx_pbl) !=
+ qed_chain_get_prod_idx(&txq->tx_pbl))
+ qede_tx_log_print(edev, fp, txq);
+ }
+ }
+
+ if (IS_VF(edev))
+ return;
+
+ if (test_and_set_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags) ||
+ edev->state == QEDE_STATE_RECOVERY) {
+ DP_INFO(edev,
+ "Avoid handling a Tx timeout while another HW error is being handled\n");
+ return;
+ }
+
+ set_bit(QEDE_ERR_GET_DBG_INFO, &edev->err_flags);
+ set_bit(QEDE_SP_HW_ERR, &edev->sp_flags);
+ schedule_delayed_work(&edev->sp_task, 0);
+}
+
+static int qede_setup_tc(struct net_device *ndev, u8 num_tc)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+ int cos, count, offset;
+
+ if (num_tc > edev->dev_info.num_tc)
+ return -EINVAL;
+
+ netdev_reset_tc(ndev);
+ netdev_set_num_tc(ndev, num_tc);
+
+ for_each_cos_in_txq(edev, cos) {
+ count = QEDE_TSS_COUNT(edev);
+ offset = cos * QEDE_TSS_COUNT(edev);
+ netdev_set_tc_queue(ndev, cos, count, offset);
+ }
+
+ return 0;
+}
+
+static int
+qede_set_flower(struct qede_dev *edev, struct flow_cls_offload *f,
+ __be16 proto)
+{
+ switch (f->command) {
+ case FLOW_CLS_REPLACE:
+ return qede_add_tc_flower_fltr(edev, proto, f);
+ case FLOW_CLS_DESTROY:
+ return qede_delete_flow_filter(edev, f->cookie);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int qede_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ struct flow_cls_offload *f;
+ struct qede_dev *edev = cb_priv;
+
+ if (!tc_cls_can_offload_and_chain0(edev->ndev, type_data))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ f = type_data;
+ return qede_set_flower(edev, f, f->common.protocol);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static LIST_HEAD(qede_block_cb_list);
+
+static int
+qede_setup_tc_offload(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ struct tc_mqprio_qopt *mqprio;
+
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ return flow_block_cb_setup_simple(type_data,
+ &qede_block_cb_list,
+ qede_setup_tc_block_cb,
+ edev, edev, true);
+ case TC_SETUP_QDISC_MQPRIO:
+ mqprio = type_data;
+
+ mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+ return qede_setup_tc(dev, mqprio->num_tc);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct net_device_ops qede_netdev_ops = {
+ .ndo_open = qede_open,
+ .ndo_stop = qede_close,
+ .ndo_start_xmit = qede_start_xmit,
+ .ndo_select_queue = qede_select_queue,
+ .ndo_set_rx_mode = qede_set_rx_mode,
+ .ndo_set_mac_address = qede_set_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = qede_change_mtu,
+ .ndo_eth_ioctl = qede_ioctl,
+ .ndo_tx_timeout = qede_tx_timeout,
+#ifdef CONFIG_QED_SRIOV
+ .ndo_set_vf_mac = qede_set_vf_mac,
+ .ndo_set_vf_vlan = qede_set_vf_vlan,
+ .ndo_set_vf_trust = qede_set_vf_trust,
+#endif
+ .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
+ .ndo_fix_features = qede_fix_features,
+ .ndo_set_features = qede_set_features,
+ .ndo_get_stats64 = qede_get_stats64,
+#ifdef CONFIG_QED_SRIOV
+ .ndo_set_vf_link_state = qede_set_vf_link_state,
+ .ndo_set_vf_spoofchk = qede_set_vf_spoofchk,
+ .ndo_get_vf_config = qede_get_vf_config,
+ .ndo_set_vf_rate = qede_set_vf_rate,
+#endif
+ .ndo_features_check = qede_features_check,
+ .ndo_bpf = qede_xdp,
+#ifdef CONFIG_RFS_ACCEL
+ .ndo_rx_flow_steer = qede_rx_flow_steer,
+#endif
+ .ndo_xdp_xmit = qede_xdp_transmit,
+ .ndo_setup_tc = qede_setup_tc_offload,
+};
+
+static const struct net_device_ops qede_netdev_vf_ops = {
+ .ndo_open = qede_open,
+ .ndo_stop = qede_close,
+ .ndo_start_xmit = qede_start_xmit,
+ .ndo_select_queue = qede_select_queue,
+ .ndo_set_rx_mode = qede_set_rx_mode,
+ .ndo_set_mac_address = qede_set_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = qede_change_mtu,
+ .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
+ .ndo_fix_features = qede_fix_features,
+ .ndo_set_features = qede_set_features,
+ .ndo_get_stats64 = qede_get_stats64,
+ .ndo_features_check = qede_features_check,
+};
+
+static const struct net_device_ops qede_netdev_vf_xdp_ops = {
+ .ndo_open = qede_open,
+ .ndo_stop = qede_close,
+ .ndo_start_xmit = qede_start_xmit,
+ .ndo_select_queue = qede_select_queue,
+ .ndo_set_rx_mode = qede_set_rx_mode,
+ .ndo_set_mac_address = qede_set_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = qede_change_mtu,
+ .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
+ .ndo_fix_features = qede_fix_features,
+ .ndo_set_features = qede_set_features,
+ .ndo_get_stats64 = qede_get_stats64,
+ .ndo_features_check = qede_features_check,
+ .ndo_bpf = qede_xdp,
+ .ndo_xdp_xmit = qede_xdp_transmit,
+};
+
+/* -------------------------------------------------------------------------
+ * START OF PROBE / REMOVE
+ * -------------------------------------------------------------------------
+ */
+
+static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
+ struct pci_dev *pdev,
+ struct qed_dev_eth_info *info,
+ u32 dp_module, u8 dp_level)
+{
+ struct net_device *ndev;
+ struct qede_dev *edev;
+
+ ndev = alloc_etherdev_mqs(sizeof(*edev),
+ info->num_queues * info->num_tc,
+ info->num_queues);
+ if (!ndev) {
+ pr_err("etherdev allocation failed\n");
+ return NULL;
+ }
+
+ edev = netdev_priv(ndev);
+ edev->ndev = ndev;
+ edev->cdev = cdev;
+ edev->pdev = pdev;
+ edev->dp_module = dp_module;
+ edev->dp_level = dp_level;
+ edev->ops = qed_ops;
+
+ if (is_kdump_kernel()) {
+ edev->q_num_rx_buffers = NUM_RX_BDS_KDUMP_MIN;
+ edev->q_num_tx_buffers = NUM_TX_BDS_KDUMP_MIN;
+ } else {
+ edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
+ edev->q_num_tx_buffers = NUM_TX_BDS_DEF;
+ }
+
+ DP_INFO(edev, "Allocated netdev with %d tx queues and %d rx queues\n",
+ info->num_queues, info->num_queues);
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ memset(&edev->stats, 0, sizeof(edev->stats));
+ memcpy(&edev->dev_info, info, sizeof(*info));
+
+ /* As ethtool doesn't have the ability to show WoL behavior as
+ * 'default', if device supports it declare it's enabled.
+ */
+ if (edev->dev_info.common.wol_support)
+ edev->wol_enabled = true;
+
+ INIT_LIST_HEAD(&edev->vlan_list);
+
+ return edev;
+}
+
+static void qede_init_ndev(struct qede_dev *edev)
+{
+ struct net_device *ndev = edev->ndev;
+ struct pci_dev *pdev = edev->pdev;
+ bool udp_tunnel_enable = false;
+ netdev_features_t hw_features;
+
+ pci_set_drvdata(pdev, ndev);
+
+ ndev->mem_start = edev->dev_info.common.pci_mem_start;
+ ndev->base_addr = ndev->mem_start;
+ ndev->mem_end = edev->dev_info.common.pci_mem_end;
+ ndev->irq = edev->dev_info.common.pci_irq;
+
+ ndev->watchdog_timeo = TX_TIMEOUT;
+
+ if (IS_VF(edev)) {
+ if (edev->dev_info.xdp_supported)
+ ndev->netdev_ops = &qede_netdev_vf_xdp_ops;
+ else
+ ndev->netdev_ops = &qede_netdev_vf_ops;
+ } else {
+ ndev->netdev_ops = &qede_netdev_ops;
+ }
+
+ qede_set_ethtool_ops(ndev);
+
+ ndev->priv_flags |= IFF_UNICAST_FLT;
+
+ /* user-changeble features */
+ hw_features = NETIF_F_GRO | NETIF_F_GRO_HW | NETIF_F_SG |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_TC;
+
+ if (edev->dev_info.common.b_arfs_capable)
+ hw_features |= NETIF_F_NTUPLE;
+
+ if (edev->dev_info.common.vxlan_enable ||
+ edev->dev_info.common.geneve_enable)
+ udp_tunnel_enable = true;
+
+ if (udp_tunnel_enable || edev->dev_info.common.gre_enable) {
+ hw_features |= NETIF_F_TSO_ECN;
+ ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_SG | NETIF_F_TSO |
+ NETIF_F_TSO_ECN | NETIF_F_TSO6 |
+ NETIF_F_RXCSUM;
+ }
+
+ if (udp_tunnel_enable) {
+ hw_features |= (NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM);
+ ndev->hw_enc_features |= (NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM);
+
+ qede_set_udp_tunnels(edev);
+ }
+
+ if (edev->dev_info.common.gre_enable) {
+ hw_features |= (NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM);
+ ndev->hw_enc_features |= (NETIF_F_GSO_GRE |
+ NETIF_F_GSO_GRE_CSUM);
+ }
+
+ ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
+ NETIF_F_HIGHDMA;
+ ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
+ NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HIGHDMA |
+ NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_TX;
+
+ ndev->hw_features = hw_features;
+
+ /* MTU range: 46 - 9600 */
+ ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
+ ndev->max_mtu = QEDE_MAX_JUMBO_PACKET_SIZE;
+
+ /* Set network device HW mac */
+ eth_hw_addr_set(edev->ndev, edev->dev_info.common.hw_mac);
+
+ ndev->mtu = edev->dev_info.common.mtu;
+}
+
+/* This function converts from 32b param to two params of level and module
+ * Input 32b decoding:
+ * b31 - enable all NOTICE prints. NOTICE prints are for deviation from the
+ * 'happy' flow, e.g. memory allocation failed.
+ * b30 - enable all INFO prints. INFO prints are for major steps in the flow
+ * and provide important parameters.
+ * b29-b0 - per-module bitmap, where each bit enables VERBOSE prints of that
+ * module. VERBOSE prints are for tracking the specific flow in low level.
+ *
+ * Notice that the level should be that of the lowest required logs.
+ */
+void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level)
+{
+ *p_dp_level = QED_LEVEL_NOTICE;
+ *p_dp_module = 0;
+
+ if (debug & QED_LOG_VERBOSE_MASK) {
+ *p_dp_level = QED_LEVEL_VERBOSE;
+ *p_dp_module = (debug & 0x3FFFFFFF);
+ } else if (debug & QED_LOG_INFO_MASK) {
+ *p_dp_level = QED_LEVEL_INFO;
+ } else if (debug & QED_LOG_NOTICE_MASK) {
+ *p_dp_level = QED_LEVEL_NOTICE;
+ }
+}
+
+static void qede_free_fp_array(struct qede_dev *edev)
+{
+ if (edev->fp_array) {
+ struct qede_fastpath *fp;
+ int i;
+
+ for_each_queue(i) {
+ fp = &edev->fp_array[i];
+
+ kfree(fp->sb_info);
+ /* Handle mem alloc failure case where qede_init_fp
+ * didn't register xdp_rxq_info yet.
+ * Implicit only (fp->type & QEDE_FASTPATH_RX)
+ */
+ if (fp->rxq && xdp_rxq_info_is_reg(&fp->rxq->xdp_rxq))
+ xdp_rxq_info_unreg(&fp->rxq->xdp_rxq);
+ kfree(fp->rxq);
+ kfree(fp->xdp_tx);
+ kfree(fp->txq);
+ }
+ kfree(edev->fp_array);
+ }
+
+ edev->num_queues = 0;
+ edev->fp_num_tx = 0;
+ edev->fp_num_rx = 0;
+}
+
+static int qede_alloc_fp_array(struct qede_dev *edev)
+{
+ u8 fp_combined, fp_rx = edev->fp_num_rx;
+ struct qede_fastpath *fp;
+ int i;
+
+ edev->fp_array = kcalloc(QEDE_QUEUE_CNT(edev),
+ sizeof(*edev->fp_array), GFP_KERNEL);
+ if (!edev->fp_array) {
+ DP_NOTICE(edev, "fp array allocation failed\n");
+ goto err;
+ }
+
+ if (!edev->coal_entry) {
+ edev->coal_entry = kcalloc(QEDE_MAX_RSS_CNT(edev),
+ sizeof(*edev->coal_entry),
+ GFP_KERNEL);
+ if (!edev->coal_entry) {
+ DP_ERR(edev, "coalesce entry allocation failed\n");
+ goto err;
+ }
+ }
+
+ fp_combined = QEDE_QUEUE_CNT(edev) - fp_rx - edev->fp_num_tx;
+
+ /* Allocate the FP elements for Rx queues followed by combined and then
+ * the Tx. This ordering should be maintained so that the respective
+ * queues (Rx or Tx) will be together in the fastpath array and the
+ * associated ids will be sequential.
+ */
+ for_each_queue(i) {
+ fp = &edev->fp_array[i];
+
+ fp->sb_info = kzalloc(sizeof(*fp->sb_info), GFP_KERNEL);
+ if (!fp->sb_info) {
+ DP_NOTICE(edev, "sb info struct allocation failed\n");
+ goto err;
+ }
+
+ if (fp_rx) {
+ fp->type = QEDE_FASTPATH_RX;
+ fp_rx--;
+ } else if (fp_combined) {
+ fp->type = QEDE_FASTPATH_COMBINED;
+ fp_combined--;
+ } else {
+ fp->type = QEDE_FASTPATH_TX;
+ }
+
+ if (fp->type & QEDE_FASTPATH_TX) {
+ fp->txq = kcalloc(edev->dev_info.num_tc,
+ sizeof(*fp->txq), GFP_KERNEL);
+ if (!fp->txq)
+ goto err;
+ }
+
+ if (fp->type & QEDE_FASTPATH_RX) {
+ fp->rxq = kzalloc(sizeof(*fp->rxq), GFP_KERNEL);
+ if (!fp->rxq)
+ goto err;
+
+ if (edev->xdp_prog) {
+ fp->xdp_tx = kzalloc(sizeof(*fp->xdp_tx),
+ GFP_KERNEL);
+ if (!fp->xdp_tx)
+ goto err;
+ fp->type |= QEDE_FASTPATH_XDP;
+ }
+ }
+ }
+
+ return 0;
+err:
+ qede_free_fp_array(edev);
+ return -ENOMEM;
+}
+
+/* The qede lock is used to protect driver state change and driver flows that
+ * are not reentrant.
+ */
+void __qede_lock(struct qede_dev *edev)
+{
+ mutex_lock(&edev->qede_lock);
+}
+
+void __qede_unlock(struct qede_dev *edev)
+{
+ mutex_unlock(&edev->qede_lock);
+}
+
+/* This version of the lock should be used when acquiring the RTNL lock is also
+ * needed in addition to the internal qede lock.
+ */
+static void qede_lock(struct qede_dev *edev)
+{
+ rtnl_lock();
+ __qede_lock(edev);
+}
+
+static void qede_unlock(struct qede_dev *edev)
+{
+ __qede_unlock(edev);
+ rtnl_unlock();
+}
+
+static void qede_periodic_task(struct work_struct *work)
+{
+ struct qede_dev *edev = container_of(work, struct qede_dev,
+ periodic_task.work);
+
+ qede_fill_by_demand_stats(edev);
+ schedule_delayed_work(&edev->periodic_task, edev->stats_coal_ticks);
+}
+
+static void qede_init_periodic_task(struct qede_dev *edev)
+{
+ INIT_DELAYED_WORK(&edev->periodic_task, qede_periodic_task);
+ spin_lock_init(&edev->stats_lock);
+ edev->stats_coal_usecs = USEC_PER_SEC;
+ edev->stats_coal_ticks = usecs_to_jiffies(USEC_PER_SEC);
+}
+
+static void qede_sp_task(struct work_struct *work)
+{
+ struct qede_dev *edev = container_of(work, struct qede_dev,
+ sp_task.work);
+
+ /* Disable execution of this deferred work once
+ * qede removal is in progress, this stop any future
+ * scheduling of sp_task.
+ */
+ if (test_bit(QEDE_SP_DISABLE, &edev->sp_flags))
+ return;
+
+ /* The locking scheme depends on the specific flag:
+ * In case of QEDE_SP_RECOVERY, acquiring the RTNL lock is required to
+ * ensure that ongoing flows are ended and new ones are not started.
+ * In other cases - only the internal qede lock should be acquired.
+ */
+
+ if (test_and_clear_bit(QEDE_SP_RECOVERY, &edev->sp_flags)) {
+ cancel_delayed_work_sync(&edev->periodic_task);
+#ifdef CONFIG_QED_SRIOV
+ /* SRIOV must be disabled outside the lock to avoid a deadlock.
+ * The recovery of the active VFs is currently not supported.
+ */
+ if (pci_num_vf(edev->pdev))
+ qede_sriov_configure(edev->pdev, 0);
+#endif
+ qede_lock(edev);
+ qede_recovery_handler(edev);
+ qede_unlock(edev);
+ }
+
+ __qede_lock(edev);
+
+ if (test_and_clear_bit(QEDE_SP_RX_MODE, &edev->sp_flags))
+ if (edev->state == QEDE_STATE_OPEN)
+ qede_config_rx_mode(edev->ndev);
+
+#ifdef CONFIG_RFS_ACCEL
+ if (test_and_clear_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags)) {
+ if (edev->state == QEDE_STATE_OPEN)
+ qede_process_arfs_filters(edev, false);
+ }
+#endif
+ if (test_and_clear_bit(QEDE_SP_HW_ERR, &edev->sp_flags))
+ qede_generic_hw_err_handler(edev);
+ __qede_unlock(edev);
+
+ if (test_and_clear_bit(QEDE_SP_AER, &edev->sp_flags)) {
+#ifdef CONFIG_QED_SRIOV
+ /* SRIOV must be disabled outside the lock to avoid a deadlock.
+ * The recovery of the active VFs is currently not supported.
+ */
+ if (pci_num_vf(edev->pdev))
+ qede_sriov_configure(edev->pdev, 0);
+#endif
+ edev->ops->common->recovery_process(edev->cdev);
+ }
+}
+
+static void qede_update_pf_params(struct qed_dev *cdev)
+{
+ struct qed_pf_params pf_params;
+ u16 num_cons;
+
+ /* 64 rx + 64 tx + 64 XDP */
+ memset(&pf_params, 0, sizeof(struct qed_pf_params));
+
+ /* 1 rx + 1 xdp + max tx cos */
+ num_cons = QED_MIN_L2_CONS;
+
+ pf_params.eth_pf_params.num_cons = (MAX_SB_PER_PF_MIMD - 1) * num_cons;
+
+ /* Same for VFs - make sure they'll have sufficient connections
+ * to support XDP Tx queues.
+ */
+ pf_params.eth_pf_params.num_vf_cons = 48;
+
+ pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR;
+ qed_ops->common->update_pf_params(cdev, &pf_params);
+}
+
+#define QEDE_FW_VER_STR_SIZE 80
+
+static void qede_log_probe(struct qede_dev *edev)
+{
+ struct qed_dev_info *p_dev_info = &edev->dev_info.common;
+ u8 buf[QEDE_FW_VER_STR_SIZE];
+ size_t left_size;
+
+ snprintf(buf, QEDE_FW_VER_STR_SIZE,
+ "Storm FW %d.%d.%d.%d, Management FW %d.%d.%d.%d",
+ p_dev_info->fw_major, p_dev_info->fw_minor, p_dev_info->fw_rev,
+ p_dev_info->fw_eng,
+ (p_dev_info->mfw_rev & QED_MFW_VERSION_3_MASK) >>
+ QED_MFW_VERSION_3_OFFSET,
+ (p_dev_info->mfw_rev & QED_MFW_VERSION_2_MASK) >>
+ QED_MFW_VERSION_2_OFFSET,
+ (p_dev_info->mfw_rev & QED_MFW_VERSION_1_MASK) >>
+ QED_MFW_VERSION_1_OFFSET,
+ (p_dev_info->mfw_rev & QED_MFW_VERSION_0_MASK) >>
+ QED_MFW_VERSION_0_OFFSET);
+
+ left_size = QEDE_FW_VER_STR_SIZE - strlen(buf);
+ if (p_dev_info->mbi_version && left_size)
+ snprintf(buf + strlen(buf), left_size,
+ " [MBI %d.%d.%d]",
+ (p_dev_info->mbi_version & QED_MBI_VERSION_2_MASK) >>
+ QED_MBI_VERSION_2_OFFSET,
+ (p_dev_info->mbi_version & QED_MBI_VERSION_1_MASK) >>
+ QED_MBI_VERSION_1_OFFSET,
+ (p_dev_info->mbi_version & QED_MBI_VERSION_0_MASK) >>
+ QED_MBI_VERSION_0_OFFSET);
+
+ pr_info("qede %02x:%02x.%02x: %s [%s]\n", edev->pdev->bus->number,
+ PCI_SLOT(edev->pdev->devfn), PCI_FUNC(edev->pdev->devfn),
+ buf, edev->ndev->name);
+}
+
+enum qede_probe_mode {
+ QEDE_PROBE_NORMAL,
+ QEDE_PROBE_RECOVERY,
+};
+
+static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
+ bool is_vf, enum qede_probe_mode mode)
+{
+ struct qed_probe_params probe_params;
+ struct qed_slowpath_params sp_params;
+ struct qed_dev_eth_info dev_info;
+ struct qede_dev *edev;
+ struct qed_dev *cdev;
+ int rc;
+
+ if (unlikely(dp_level & QED_LEVEL_INFO))
+ pr_notice("Starting qede probe\n");
+
+ memset(&probe_params, 0, sizeof(probe_params));
+ probe_params.protocol = QED_PROTOCOL_ETH;
+ probe_params.dp_module = dp_module;
+ probe_params.dp_level = dp_level;
+ probe_params.is_vf = is_vf;
+ probe_params.recov_in_prog = (mode == QEDE_PROBE_RECOVERY);
+ cdev = qed_ops->common->probe(pdev, &probe_params);
+ if (!cdev) {
+ rc = -ENODEV;
+ goto err0;
+ }
+
+ qede_update_pf_params(cdev);
+
+ /* Start the Slowpath-process */
+ memset(&sp_params, 0, sizeof(sp_params));
+ sp_params.int_mode = QED_INT_MODE_MSIX;
+ strscpy(sp_params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
+ rc = qed_ops->common->slowpath_start(cdev, &sp_params);
+ if (rc) {
+ pr_notice("Cannot start slowpath\n");
+ goto err1;
+ }
+
+ /* Learn information crucial for qede to progress */
+ rc = qed_ops->fill_dev_info(cdev, &dev_info);
+ if (rc)
+ goto err2;
+
+ if (mode != QEDE_PROBE_RECOVERY) {
+ edev = qede_alloc_etherdev(cdev, pdev, &dev_info, dp_module,
+ dp_level);
+ if (!edev) {
+ rc = -ENOMEM;
+ goto err2;
+ }
+
+ edev->devlink = qed_ops->common->devlink_register(cdev);
+ if (IS_ERR(edev->devlink)) {
+ DP_NOTICE(edev, "Cannot register devlink\n");
+ rc = PTR_ERR(edev->devlink);
+ edev->devlink = NULL;
+ goto err3;
+ }
+ } else {
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ struct qed_devlink *qdl;
+
+ edev = netdev_priv(ndev);
+ qdl = devlink_priv(edev->devlink);
+ qdl->cdev = cdev;
+ edev->cdev = cdev;
+ memset(&edev->stats, 0, sizeof(edev->stats));
+ memcpy(&edev->dev_info, &dev_info, sizeof(dev_info));
+ }
+
+ if (is_vf)
+ set_bit(QEDE_FLAGS_IS_VF, &edev->flags);
+
+ qede_init_ndev(edev);
+
+ rc = qede_rdma_dev_add(edev, (mode == QEDE_PROBE_RECOVERY));
+ if (rc)
+ goto err3;
+
+ if (mode != QEDE_PROBE_RECOVERY) {
+ /* Prepare the lock prior to the registration of the netdev,
+ * as once it's registered we might reach flows requiring it
+ * [it's even possible to reach a flow needing it directly
+ * from there, although it's unlikely].
+ */
+ INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
+ mutex_init(&edev->qede_lock);
+ qede_init_periodic_task(edev);
+
+ rc = register_netdev(edev->ndev);
+ if (rc) {
+ DP_NOTICE(edev, "Cannot register net-device\n");
+ goto err4;
+ }
+ }
+
+ edev->ops->common->set_name(cdev, edev->ndev->name);
+
+ /* PTP not supported on VFs */
+ if (!is_vf)
+ qede_ptp_enable(edev);
+
+ edev->ops->register_ops(cdev, &qede_ll_ops, edev);
+
+#ifdef CONFIG_DCB
+ if (!IS_VF(edev))
+ qede_set_dcbnl_ops(edev->ndev);
+#endif
+
+ edev->rx_copybreak = QEDE_RX_HDR_SIZE;
+
+ qede_log_probe(edev);
+
+ /* retain user config (for example - after recovery) */
+ if (edev->stats_coal_usecs)
+ schedule_delayed_work(&edev->periodic_task, 0);
+
+ return 0;
+
+err4:
+ qede_rdma_dev_remove(edev, (mode == QEDE_PROBE_RECOVERY));
+err3:
+ if (mode != QEDE_PROBE_RECOVERY)
+ free_netdev(edev->ndev);
+ else
+ edev->cdev = NULL;
+err2:
+ qed_ops->common->slowpath_stop(cdev);
+err1:
+ qed_ops->common->remove(cdev);
+err0:
+ return rc;
+}
+
+static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ bool is_vf = false;
+ u32 dp_module = 0;
+ u8 dp_level = 0;
+
+ switch ((enum qede_pci_private)id->driver_data) {
+ case QEDE_PRIVATE_VF:
+ if (debug & QED_LOG_VERBOSE_MASK)
+ dev_err(&pdev->dev, "Probing a VF\n");
+ is_vf = true;
+ break;
+ default:
+ if (debug & QED_LOG_VERBOSE_MASK)
+ dev_err(&pdev->dev, "Probing a PF\n");
+ }
+
+ qede_config_debug(debug, &dp_module, &dp_level);
+
+ return __qede_probe(pdev, dp_module, dp_level, is_vf,
+ QEDE_PROBE_NORMAL);
+}
+
+enum qede_remove_mode {
+ QEDE_REMOVE_NORMAL,
+ QEDE_REMOVE_RECOVERY,
+};
+
+static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
+{
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ struct qede_dev *edev;
+ struct qed_dev *cdev;
+
+ if (!ndev) {
+ dev_info(&pdev->dev, "Device has already been removed\n");
+ return;
+ }
+
+ edev = netdev_priv(ndev);
+ cdev = edev->cdev;
+
+ DP_INFO(edev, "Starting qede_remove\n");
+
+ qede_rdma_dev_remove(edev, (mode == QEDE_REMOVE_RECOVERY));
+
+ if (mode != QEDE_REMOVE_RECOVERY) {
+ set_bit(QEDE_SP_DISABLE, &edev->sp_flags);
+ unregister_netdev(ndev);
+
+ cancel_delayed_work_sync(&edev->sp_task);
+ cancel_delayed_work_sync(&edev->periodic_task);
+
+ edev->ops->common->set_power_state(cdev, PCI_D0);
+
+ pci_set_drvdata(pdev, NULL);
+ }
+
+ qede_ptp_disable(edev);
+
+ /* Use global ops since we've freed edev */
+ qed_ops->common->slowpath_stop(cdev);
+ if (system_state == SYSTEM_POWER_OFF)
+ return;
+
+ if (mode != QEDE_REMOVE_RECOVERY && edev->devlink) {
+ qed_ops->common->devlink_unregister(edev->devlink);
+ edev->devlink = NULL;
+ }
+ qed_ops->common->remove(cdev);
+ edev->cdev = NULL;
+
+ /* Since this can happen out-of-sync with other flows,
+ * don't release the netdevice until after slowpath stop
+ * has been called to guarantee various other contexts
+ * [e.g., QED register callbacks] won't break anything when
+ * accessing the netdevice.
+ */
+ if (mode != QEDE_REMOVE_RECOVERY) {
+ kfree(edev->coal_entry);
+ free_netdev(ndev);
+ }
+
+ dev_info(&pdev->dev, "Ending qede_remove successfully\n");
+}
+
+static void qede_remove(struct pci_dev *pdev)
+{
+ __qede_remove(pdev, QEDE_REMOVE_NORMAL);
+}
+
+static void qede_shutdown(struct pci_dev *pdev)
+{
+ __qede_remove(pdev, QEDE_REMOVE_NORMAL);
+}
+
+/* -------------------------------------------------------------------------
+ * START OF LOAD / UNLOAD
+ * -------------------------------------------------------------------------
+ */
+
+static int qede_set_num_queues(struct qede_dev *edev)
+{
+ int rc;
+ u16 rss_num;
+
+ /* Setup queues according to possible resources*/
+ if (edev->req_queues)
+ rss_num = edev->req_queues;
+ else
+ rss_num = netif_get_num_default_rss_queues() *
+ edev->dev_info.common.num_hwfns;
+
+ rss_num = min_t(u16, QEDE_MAX_RSS_CNT(edev), rss_num);
+
+ rc = edev->ops->common->set_fp_int(edev->cdev, rss_num);
+ if (rc > 0) {
+ /* Managed to request interrupts for our queues */
+ edev->num_queues = rc;
+ DP_INFO(edev, "Managed %d [of %d] RSS queues\n",
+ QEDE_QUEUE_CNT(edev), rss_num);
+ rc = 0;
+ }
+
+ edev->fp_num_tx = edev->req_num_tx;
+ edev->fp_num_rx = edev->req_num_rx;
+
+ return rc;
+}
+
+static void qede_free_mem_sb(struct qede_dev *edev, struct qed_sb_info *sb_info,
+ u16 sb_id)
+{
+ if (sb_info->sb_virt) {
+ edev->ops->common->sb_release(edev->cdev, sb_info, sb_id,
+ QED_SB_TYPE_L2_QUEUE);
+ dma_free_coherent(&edev->pdev->dev, sizeof(*sb_info->sb_virt),
+ (void *)sb_info->sb_virt, sb_info->sb_phys);
+ memset(sb_info, 0, sizeof(*sb_info));
+ }
+}
+
+/* This function allocates fast-path status block memory */
+static int qede_alloc_mem_sb(struct qede_dev *edev,
+ struct qed_sb_info *sb_info, u16 sb_id)
+{
+ struct status_block *sb_virt;
+ dma_addr_t sb_phys;
+ int rc;
+
+ sb_virt = dma_alloc_coherent(&edev->pdev->dev,
+ sizeof(*sb_virt), &sb_phys, GFP_KERNEL);
+ if (!sb_virt) {
+ DP_ERR(edev, "Status block allocation failed\n");
+ return -ENOMEM;
+ }
+
+ rc = edev->ops->common->sb_init(edev->cdev, sb_info,
+ sb_virt, sb_phys, sb_id,
+ QED_SB_TYPE_L2_QUEUE);
+ if (rc) {
+ DP_ERR(edev, "Status block initialization failed\n");
+ dma_free_coherent(&edev->pdev->dev, sizeof(*sb_virt),
+ sb_virt, sb_phys);
+ return rc;
+ }
+
+ return 0;
+}
+
+static void qede_free_rx_buffers(struct qede_dev *edev,
+ struct qede_rx_queue *rxq)
+{
+ u16 i;
+
+ for (i = rxq->sw_rx_cons; i != rxq->sw_rx_prod; i++) {
+ struct sw_rx_data *rx_buf;
+ struct page *data;
+
+ rx_buf = &rxq->sw_rx_ring[i & NUM_RX_BDS_MAX];
+ data = rx_buf->data;
+
+ dma_unmap_page(&edev->pdev->dev,
+ rx_buf->mapping, PAGE_SIZE, rxq->data_direction);
+
+ rx_buf->data = NULL;
+ __free_page(data);
+ }
+}
+
+static void qede_free_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
+{
+ /* Free rx buffers */
+ qede_free_rx_buffers(edev, rxq);
+
+ /* Free the parallel SW ring */
+ kfree(rxq->sw_rx_ring);
+
+ /* Free the real RQ ring used by FW */
+ edev->ops->common->chain_free(edev->cdev, &rxq->rx_bd_ring);
+ edev->ops->common->chain_free(edev->cdev, &rxq->rx_comp_ring);
+}
+
+static void qede_set_tpa_param(struct qede_rx_queue *rxq)
+{
+ int i;
+
+ for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
+ struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
+
+ tpa_info->state = QEDE_AGG_STATE_NONE;
+ }
+}
+
+/* This function allocates all memory needed per Rx queue */
+static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
+{
+ struct qed_chain_init_params params = {
+ .cnt_type = QED_CHAIN_CNT_TYPE_U16,
+ .num_elems = RX_RING_SIZE,
+ };
+ struct qed_dev *cdev = edev->cdev;
+ int i, rc, size;
+
+ rxq->num_rx_buffers = edev->q_num_rx_buffers;
+
+ rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD + edev->ndev->mtu;
+
+ rxq->rx_headroom = edev->xdp_prog ? XDP_PACKET_HEADROOM : NET_SKB_PAD;
+ size = rxq->rx_headroom +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ /* Make sure that the headroom and payload fit in a single page */
+ if (rxq->rx_buf_size + size > PAGE_SIZE)
+ rxq->rx_buf_size = PAGE_SIZE - size;
+
+ /* Segment size to split a page in multiple equal parts,
+ * unless XDP is used in which case we'd use the entire page.
+ */
+ if (!edev->xdp_prog) {
+ size = size + rxq->rx_buf_size;
+ rxq->rx_buf_seg_size = roundup_pow_of_two(size);
+ } else {
+ rxq->rx_buf_seg_size = PAGE_SIZE;
+ edev->ndev->features &= ~NETIF_F_GRO_HW;
+ }
+
+ /* Allocate the parallel driver ring for Rx buffers */
+ size = sizeof(*rxq->sw_rx_ring) * RX_RING_SIZE;
+ rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL);
+ if (!rxq->sw_rx_ring) {
+ DP_ERR(edev, "Rx buffers ring allocation failed\n");
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ /* Allocate FW Rx ring */
+ params.mode = QED_CHAIN_MODE_NEXT_PTR;
+ params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE;
+ params.elem_size = sizeof(struct eth_rx_bd);
+
+ rc = edev->ops->common->chain_alloc(cdev, &rxq->rx_bd_ring, &params);
+ if (rc)
+ goto err;
+
+ /* Allocate FW completion ring */
+ params.mode = QED_CHAIN_MODE_PBL;
+ params.intended_use = QED_CHAIN_USE_TO_CONSUME;
+ params.elem_size = sizeof(union eth_rx_cqe);
+
+ rc = edev->ops->common->chain_alloc(cdev, &rxq->rx_comp_ring, &params);
+ if (rc)
+ goto err;
+
+ /* Allocate buffers for the Rx ring */
+ rxq->filled_buffers = 0;
+ for (i = 0; i < rxq->num_rx_buffers; i++) {
+ rc = qede_alloc_rx_buffer(rxq, false);
+ if (rc) {
+ DP_ERR(edev,
+ "Rx buffers allocation failed at index %d\n", i);
+ goto err;
+ }
+ }
+
+ edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO_HW);
+ if (!edev->gro_disable)
+ qede_set_tpa_param(rxq);
+err:
+ return rc;
+}
+
+static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
+{
+ /* Free the parallel SW ring */
+ if (txq->is_xdp)
+ kfree(txq->sw_tx_ring.xdp);
+ else
+ kfree(txq->sw_tx_ring.skbs);
+
+ /* Free the real RQ ring used by FW */
+ edev->ops->common->chain_free(edev->cdev, &txq->tx_pbl);
+}
+
+/* This function allocates all memory needed per Tx queue */
+static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
+{
+ struct qed_chain_init_params params = {
+ .mode = QED_CHAIN_MODE_PBL,
+ .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+ .cnt_type = QED_CHAIN_CNT_TYPE_U16,
+ .num_elems = edev->q_num_tx_buffers,
+ .elem_size = sizeof(union eth_tx_bd_types),
+ };
+ int size, rc;
+
+ txq->num_tx_buffers = edev->q_num_tx_buffers;
+
+ /* Allocate the parallel driver ring for Tx buffers */
+ if (txq->is_xdp) {
+ size = sizeof(*txq->sw_tx_ring.xdp) * txq->num_tx_buffers;
+ txq->sw_tx_ring.xdp = kzalloc(size, GFP_KERNEL);
+ if (!txq->sw_tx_ring.xdp)
+ goto err;
+ } else {
+ size = sizeof(*txq->sw_tx_ring.skbs) * txq->num_tx_buffers;
+ txq->sw_tx_ring.skbs = kzalloc(size, GFP_KERNEL);
+ if (!txq->sw_tx_ring.skbs)
+ goto err;
+ }
+
+ rc = edev->ops->common->chain_alloc(edev->cdev, &txq->tx_pbl, &params);
+ if (rc)
+ goto err;
+
+ return 0;
+
+err:
+ qede_free_mem_txq(edev, txq);
+ return -ENOMEM;
+}
+
+/* This function frees all memory of a single fp */
+static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
+{
+ qede_free_mem_sb(edev, fp->sb_info, fp->id);
+
+ if (fp->type & QEDE_FASTPATH_RX)
+ qede_free_mem_rxq(edev, fp->rxq);
+
+ if (fp->type & QEDE_FASTPATH_XDP)
+ qede_free_mem_txq(edev, fp->xdp_tx);
+
+ if (fp->type & QEDE_FASTPATH_TX) {
+ int cos;
+
+ for_each_cos_in_txq(edev, cos)
+ qede_free_mem_txq(edev, &fp->txq[cos]);
+ }
+}
+
+/* This function allocates all memory needed for a single fp (i.e. an entity
+ * which contains status block, one rx queue and/or multiple per-TC tx queues.
+ */
+static int qede_alloc_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
+{
+ int rc = 0;
+
+ rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->id);
+ if (rc)
+ goto out;
+
+ if (fp->type & QEDE_FASTPATH_RX) {
+ rc = qede_alloc_mem_rxq(edev, fp->rxq);
+ if (rc)
+ goto out;
+ }
+
+ if (fp->type & QEDE_FASTPATH_XDP) {
+ rc = qede_alloc_mem_txq(edev, fp->xdp_tx);
+ if (rc)
+ goto out;
+ }
+
+ if (fp->type & QEDE_FASTPATH_TX) {
+ int cos;
+
+ for_each_cos_in_txq(edev, cos) {
+ rc = qede_alloc_mem_txq(edev, &fp->txq[cos]);
+ if (rc)
+ goto out;
+ }
+ }
+
+out:
+ return rc;
+}
+
+static void qede_free_mem_load(struct qede_dev *edev)
+{
+ int i;
+
+ for_each_queue(i) {
+ struct qede_fastpath *fp = &edev->fp_array[i];
+
+ qede_free_mem_fp(edev, fp);
+ }
+}
+
+/* This function allocates all qede memory at NIC load. */
+static int qede_alloc_mem_load(struct qede_dev *edev)
+{
+ int rc = 0, queue_id;
+
+ for (queue_id = 0; queue_id < QEDE_QUEUE_CNT(edev); queue_id++) {
+ struct qede_fastpath *fp = &edev->fp_array[queue_id];
+
+ rc = qede_alloc_mem_fp(edev, fp);
+ if (rc) {
+ DP_ERR(edev,
+ "Failed to allocate memory for fastpath - rss id = %d\n",
+ queue_id);
+ qede_free_mem_load(edev);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static void qede_empty_tx_queue(struct qede_dev *edev,
+ struct qede_tx_queue *txq)
+{
+ unsigned int pkts_compl = 0, bytes_compl = 0;
+ struct netdev_queue *netdev_txq;
+ int rc, len = 0;
+
+ netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id);
+
+ while (qed_chain_get_cons_idx(&txq->tx_pbl) !=
+ qed_chain_get_prod_idx(&txq->tx_pbl)) {
+ DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
+ "Freeing a packet on tx queue[%d]: chain_cons 0x%x, chain_prod 0x%x\n",
+ txq->index, qed_chain_get_cons_idx(&txq->tx_pbl),
+ qed_chain_get_prod_idx(&txq->tx_pbl));
+
+ rc = qede_free_tx_pkt(edev, txq, &len);
+ if (rc) {
+ DP_NOTICE(edev,
+ "Failed to free a packet on tx queue[%d]: chain_cons 0x%x, chain_prod 0x%x\n",
+ txq->index,
+ qed_chain_get_cons_idx(&txq->tx_pbl),
+ qed_chain_get_prod_idx(&txq->tx_pbl));
+ break;
+ }
+
+ bytes_compl += len;
+ pkts_compl++;
+ txq->sw_tx_cons++;
+ }
+
+ netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl);
+}
+
+static void qede_empty_tx_queues(struct qede_dev *edev)
+{
+ int i;
+
+ for_each_queue(i)
+ if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
+ int cos;
+
+ for_each_cos_in_txq(edev, cos) {
+ struct qede_fastpath *fp;
+
+ fp = &edev->fp_array[i];
+ qede_empty_tx_queue(edev,
+ &fp->txq[cos]);
+ }
+ }
+}
+
+/* This function inits fp content and resets the SB, RXQ and TXQ structures */
+static void qede_init_fp(struct qede_dev *edev)
+{
+ int queue_id, rxq_index = 0, txq_index = 0;
+ struct qede_fastpath *fp;
+ bool init_xdp = false;
+
+ for_each_queue(queue_id) {
+ fp = &edev->fp_array[queue_id];
+
+ fp->edev = edev;
+ fp->id = queue_id;
+
+ if (fp->type & QEDE_FASTPATH_XDP) {
+ fp->xdp_tx->index = QEDE_TXQ_IDX_TO_XDP(edev,
+ rxq_index);
+ fp->xdp_tx->is_xdp = 1;
+
+ spin_lock_init(&fp->xdp_tx->xdp_tx_lock);
+ init_xdp = true;
+ }
+
+ if (fp->type & QEDE_FASTPATH_RX) {
+ fp->rxq->rxq_id = rxq_index++;
+
+ /* Determine how to map buffers for this queue */
+ if (fp->type & QEDE_FASTPATH_XDP)
+ fp->rxq->data_direction = DMA_BIDIRECTIONAL;
+ else
+ fp->rxq->data_direction = DMA_FROM_DEVICE;
+ fp->rxq->dev = &edev->pdev->dev;
+
+ /* Driver have no error path from here */
+ WARN_ON(xdp_rxq_info_reg(&fp->rxq->xdp_rxq, edev->ndev,
+ fp->rxq->rxq_id, 0) < 0);
+
+ if (xdp_rxq_info_reg_mem_model(&fp->rxq->xdp_rxq,
+ MEM_TYPE_PAGE_ORDER0,
+ NULL)) {
+ DP_NOTICE(edev,
+ "Failed to register XDP memory model\n");
+ }
+ }
+
+ if (fp->type & QEDE_FASTPATH_TX) {
+ int cos;
+
+ for_each_cos_in_txq(edev, cos) {
+ struct qede_tx_queue *txq = &fp->txq[cos];
+ u16 ndev_tx_id;
+
+ txq->cos = cos;
+ txq->index = txq_index;
+ ndev_tx_id = QEDE_TXQ_TO_NDEV_TXQ_ID(edev, txq);
+ txq->ndev_txq_id = ndev_tx_id;
+
+ if (edev->dev_info.is_legacy)
+ txq->is_legacy = true;
+ txq->dev = &edev->pdev->dev;
+ }
+
+ txq_index++;
+ }
+
+ snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
+ edev->ndev->name, queue_id);
+ }
+
+ if (init_xdp) {
+ edev->total_xdp_queues = QEDE_RSS_COUNT(edev);
+ DP_INFO(edev, "Total XDP queues: %u\n", edev->total_xdp_queues);
+ }
+}
+
+static int qede_set_real_num_queues(struct qede_dev *edev)
+{
+ int rc = 0;
+
+ rc = netif_set_real_num_tx_queues(edev->ndev,
+ QEDE_TSS_COUNT(edev) *
+ edev->dev_info.num_tc);
+ if (rc) {
+ DP_NOTICE(edev, "Failed to set real number of Tx queues\n");
+ return rc;
+ }
+
+ rc = netif_set_real_num_rx_queues(edev->ndev, QEDE_RSS_COUNT(edev));
+ if (rc) {
+ DP_NOTICE(edev, "Failed to set real number of Rx queues\n");
+ return rc;
+ }
+
+ return 0;
+}
+
+static void qede_napi_disable_remove(struct qede_dev *edev)
+{
+ int i;
+
+ for_each_queue(i) {
+ napi_disable(&edev->fp_array[i].napi);
+
+ netif_napi_del(&edev->fp_array[i].napi);
+ }
+}
+
+static void qede_napi_add_enable(struct qede_dev *edev)
+{
+ int i;
+
+ /* Add NAPI objects */
+ for_each_queue(i) {
+ netif_napi_add(edev->ndev, &edev->fp_array[i].napi, qede_poll);
+ napi_enable(&edev->fp_array[i].napi);
+ }
+}
+
+static void qede_sync_free_irqs(struct qede_dev *edev)
+{
+ int i;
+
+ for (i = 0; i < edev->int_info.used_cnt; i++) {
+ if (edev->int_info.msix_cnt) {
+ free_irq(edev->int_info.msix[i].vector,
+ &edev->fp_array[i]);
+ } else {
+ edev->ops->common->simd_handler_clean(edev->cdev, i);
+ }
+ }
+
+ edev->int_info.used_cnt = 0;
+ edev->int_info.msix_cnt = 0;
+}
+
+static int qede_req_msix_irqs(struct qede_dev *edev)
+{
+ int i, rc;
+
+ /* Sanitize number of interrupts == number of prepared RSS queues */
+ if (QEDE_QUEUE_CNT(edev) > edev->int_info.msix_cnt) {
+ DP_ERR(edev,
+ "Interrupt mismatch: %d RSS queues > %d MSI-x vectors\n",
+ QEDE_QUEUE_CNT(edev), edev->int_info.msix_cnt);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) {
+#ifdef CONFIG_RFS_ACCEL
+ struct qede_fastpath *fp = &edev->fp_array[i];
+
+ if (edev->ndev->rx_cpu_rmap && (fp->type & QEDE_FASTPATH_RX)) {
+ rc = irq_cpu_rmap_add(edev->ndev->rx_cpu_rmap,
+ edev->int_info.msix[i].vector);
+ if (rc) {
+ DP_ERR(edev, "Failed to add CPU rmap\n");
+ qede_free_arfs(edev);
+ }
+ }
+#endif
+ rc = request_irq(edev->int_info.msix[i].vector,
+ qede_msix_fp_int, 0, edev->fp_array[i].name,
+ &edev->fp_array[i]);
+ if (rc) {
+ DP_ERR(edev, "Request fp %d irq failed\n", i);
+#ifdef CONFIG_RFS_ACCEL
+ if (edev->ndev->rx_cpu_rmap)
+ free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap);
+
+ edev->ndev->rx_cpu_rmap = NULL;
+#endif
+ qede_sync_free_irqs(edev);
+ return rc;
+ }
+ DP_VERBOSE(edev, NETIF_MSG_INTR,
+ "Requested fp irq for %s [entry %d]. Cookie is at %p\n",
+ edev->fp_array[i].name, i,
+ &edev->fp_array[i]);
+ edev->int_info.used_cnt++;
+ }
+
+ return 0;
+}
+
+static void qede_simd_fp_handler(void *cookie)
+{
+ struct qede_fastpath *fp = (struct qede_fastpath *)cookie;
+
+ napi_schedule_irqoff(&fp->napi);
+}
+
+static int qede_setup_irqs(struct qede_dev *edev)
+{
+ int i, rc = 0;
+
+ /* Learn Interrupt configuration */
+ rc = edev->ops->common->get_fp_int(edev->cdev, &edev->int_info);
+ if (rc)
+ return rc;
+
+ if (edev->int_info.msix_cnt) {
+ rc = qede_req_msix_irqs(edev);
+ if (rc)
+ return rc;
+ edev->ndev->irq = edev->int_info.msix[0].vector;
+ } else {
+ const struct qed_common_ops *ops;
+
+ /* qed should learn receive the RSS ids and callbacks */
+ ops = edev->ops->common;
+ for (i = 0; i < QEDE_QUEUE_CNT(edev); i++)
+ ops->simd_handler_config(edev->cdev,
+ &edev->fp_array[i], i,
+ qede_simd_fp_handler);
+ edev->int_info.used_cnt = QEDE_QUEUE_CNT(edev);
+ }
+ return 0;
+}
+
+static int qede_drain_txq(struct qede_dev *edev,
+ struct qede_tx_queue *txq, bool allow_drain)
+{
+ int rc, cnt = 1000;
+
+ while (txq->sw_tx_cons != txq->sw_tx_prod) {
+ if (!cnt) {
+ if (allow_drain) {
+ DP_NOTICE(edev,
+ "Tx queue[%d] is stuck, requesting MCP to drain\n",
+ txq->index);
+ rc = edev->ops->common->drain(edev->cdev);
+ if (rc)
+ return rc;
+ return qede_drain_txq(edev, txq, false);
+ }
+ DP_NOTICE(edev,
+ "Timeout waiting for tx queue[%d]: PROD=%d, CONS=%d\n",
+ txq->index, txq->sw_tx_prod,
+ txq->sw_tx_cons);
+ return -ENODEV;
+ }
+ cnt--;
+ usleep_range(1000, 2000);
+ barrier();
+ }
+
+ /* FW finished processing, wait for HW to transmit all tx packets */
+ usleep_range(1000, 2000);
+
+ return 0;
+}
+
+static int qede_stop_txq(struct qede_dev *edev,
+ struct qede_tx_queue *txq, int rss_id)
+{
+ /* delete doorbell from doorbell recovery mechanism */
+ edev->ops->common->db_recovery_del(edev->cdev, txq->doorbell_addr,
+ &txq->tx_db);
+
+ return edev->ops->q_tx_stop(edev->cdev, rss_id, txq->handle);
+}
+
+static int qede_stop_queues(struct qede_dev *edev)
+{
+ struct qed_update_vport_params *vport_update_params;
+ struct qed_dev *cdev = edev->cdev;
+ struct qede_fastpath *fp;
+ int rc, i;
+
+ /* Disable the vport */
+ vport_update_params = vzalloc(sizeof(*vport_update_params));
+ if (!vport_update_params)
+ return -ENOMEM;
+
+ vport_update_params->vport_id = 0;
+ vport_update_params->update_vport_active_flg = 1;
+ vport_update_params->vport_active_flg = 0;
+ vport_update_params->update_rss_flg = 0;
+
+ rc = edev->ops->vport_update(cdev, vport_update_params);
+ vfree(vport_update_params);
+
+ if (rc) {
+ DP_ERR(edev, "Failed to update vport\n");
+ return rc;
+ }
+
+ /* Flush Tx queues. If needed, request drain from MCP */
+ for_each_queue(i) {
+ fp = &edev->fp_array[i];
+
+ if (fp->type & QEDE_FASTPATH_TX) {
+ int cos;
+
+ for_each_cos_in_txq(edev, cos) {
+ rc = qede_drain_txq(edev, &fp->txq[cos], true);
+ if (rc)
+ return rc;
+ }
+ }
+
+ if (fp->type & QEDE_FASTPATH_XDP) {
+ rc = qede_drain_txq(edev, fp->xdp_tx, true);
+ if (rc)
+ return rc;
+ }
+ }
+
+ /* Stop all Queues in reverse order */
+ for (i = QEDE_QUEUE_CNT(edev) - 1; i >= 0; i--) {
+ fp = &edev->fp_array[i];
+
+ /* Stop the Tx Queue(s) */
+ if (fp->type & QEDE_FASTPATH_TX) {
+ int cos;
+
+ for_each_cos_in_txq(edev, cos) {
+ rc = qede_stop_txq(edev, &fp->txq[cos], i);
+ if (rc)
+ return rc;
+ }
+ }
+
+ /* Stop the Rx Queue */
+ if (fp->type & QEDE_FASTPATH_RX) {
+ rc = edev->ops->q_rx_stop(cdev, i, fp->rxq->handle);
+ if (rc) {
+ DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
+ return rc;
+ }
+ }
+
+ /* Stop the XDP forwarding queue */
+ if (fp->type & QEDE_FASTPATH_XDP) {
+ rc = qede_stop_txq(edev, fp->xdp_tx, i);
+ if (rc)
+ return rc;
+
+ bpf_prog_put(fp->rxq->xdp_prog);
+ }
+ }
+
+ /* Stop the vport */
+ rc = edev->ops->vport_stop(cdev, 0);
+ if (rc)
+ DP_ERR(edev, "Failed to stop VPORT\n");
+
+ return rc;
+}
+
+static int qede_start_txq(struct qede_dev *edev,
+ struct qede_fastpath *fp,
+ struct qede_tx_queue *txq, u8 rss_id, u16 sb_idx)
+{
+ dma_addr_t phys_table = qed_chain_get_pbl_phys(&txq->tx_pbl);
+ u32 page_cnt = qed_chain_get_page_cnt(&txq->tx_pbl);
+ struct qed_queue_start_common_params params;
+ struct qed_txq_start_ret_params ret_params;
+ int rc;
+
+ memset(&params, 0, sizeof(params));
+ memset(&ret_params, 0, sizeof(ret_params));
+
+ /* Let the XDP queue share the queue-zone with one of the regular txq.
+ * We don't really care about its coalescing.
+ */
+ if (txq->is_xdp)
+ params.queue_id = QEDE_TXQ_XDP_TO_IDX(edev, txq);
+ else
+ params.queue_id = txq->index;
+
+ params.p_sb = fp->sb_info;
+ params.sb_idx = sb_idx;
+ params.tc = txq->cos;
+
+ rc = edev->ops->q_tx_start(edev->cdev, rss_id, &params, phys_table,
+ page_cnt, &ret_params);
+ if (rc) {
+ DP_ERR(edev, "Start TXQ #%d failed %d\n", txq->index, rc);
+ return rc;
+ }
+
+ txq->doorbell_addr = ret_params.p_doorbell;
+ txq->handle = ret_params.p_handle;
+
+ /* Determine the FW consumer address associated */
+ txq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[sb_idx];
+
+ /* Prepare the doorbell parameters */
+ SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_DEST, DB_DEST_XCM);
+ SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
+ SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_VAL_SEL,
+ DQ_XCM_ETH_TX_BD_PROD_CMD);
+ txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
+
+ /* register doorbell with doorbell recovery mechanism */
+ rc = edev->ops->common->db_recovery_add(edev->cdev, txq->doorbell_addr,
+ &txq->tx_db, DB_REC_WIDTH_32B,
+ DB_REC_KERNEL);
+
+ return rc;
+}
+
+static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
+{
+ int vlan_removal_en = 1;
+ struct qed_dev *cdev = edev->cdev;
+ struct qed_dev_info *qed_info = &edev->dev_info.common;
+ struct qed_update_vport_params *vport_update_params;
+ struct qed_queue_start_common_params q_params;
+ struct qed_start_vport_params start = {0};
+ int rc, i;
+
+ if (!edev->num_queues) {
+ DP_ERR(edev,
+ "Cannot update V-VPORT as active as there are no Rx queues\n");
+ return -EINVAL;
+ }
+
+ vport_update_params = vzalloc(sizeof(*vport_update_params));
+ if (!vport_update_params)
+ return -ENOMEM;
+
+ start.handle_ptp_pkts = !!(edev->ptp);
+ start.gro_enable = !edev->gro_disable;
+ start.mtu = edev->ndev->mtu;
+ start.vport_id = 0;
+ start.drop_ttl0 = true;
+ start.remove_inner_vlan = vlan_removal_en;
+ start.clear_stats = clear_stats;
+
+ rc = edev->ops->vport_start(cdev, &start);
+
+ if (rc) {
+ DP_ERR(edev, "Start V-PORT failed %d\n", rc);
+ goto out;
+ }
+
+ DP_VERBOSE(edev, NETIF_MSG_IFUP,
+ "Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
+ start.vport_id, edev->ndev->mtu + 0xe, vlan_removal_en);
+
+ for_each_queue(i) {
+ struct qede_fastpath *fp = &edev->fp_array[i];
+ dma_addr_t p_phys_table;
+ u32 page_cnt;
+
+ if (fp->type & QEDE_FASTPATH_RX) {
+ struct qed_rxq_start_ret_params ret_params;
+ struct qede_rx_queue *rxq = fp->rxq;
+ __le16 *val;
+
+ memset(&ret_params, 0, sizeof(ret_params));
+ memset(&q_params, 0, sizeof(q_params));
+ q_params.queue_id = rxq->rxq_id;
+ q_params.vport_id = 0;
+ q_params.p_sb = fp->sb_info;
+ q_params.sb_idx = RX_PI;
+
+ p_phys_table =
+ qed_chain_get_pbl_phys(&rxq->rx_comp_ring);
+ page_cnt = qed_chain_get_page_cnt(&rxq->rx_comp_ring);
+
+ rc = edev->ops->q_rx_start(cdev, i, &q_params,
+ rxq->rx_buf_size,
+ rxq->rx_bd_ring.p_phys_addr,
+ p_phys_table,
+ page_cnt, &ret_params);
+ if (rc) {
+ DP_ERR(edev, "Start RXQ #%d failed %d\n", i,
+ rc);
+ goto out;
+ }
+
+ /* Use the return parameters */
+ rxq->hw_rxq_prod_addr = ret_params.p_prod;
+ rxq->handle = ret_params.p_handle;
+
+ val = &fp->sb_info->sb_virt->pi_array[RX_PI];
+ rxq->hw_cons_ptr = val;
+
+ qede_update_rx_prod(edev, rxq);
+ }
+
+ if (fp->type & QEDE_FASTPATH_XDP) {
+ rc = qede_start_txq(edev, fp, fp->xdp_tx, i, XDP_PI);
+ if (rc)
+ goto out;
+
+ bpf_prog_add(edev->xdp_prog, 1);
+ fp->rxq->xdp_prog = edev->xdp_prog;
+ }
+
+ if (fp->type & QEDE_FASTPATH_TX) {
+ int cos;
+
+ for_each_cos_in_txq(edev, cos) {
+ rc = qede_start_txq(edev, fp, &fp->txq[cos], i,
+ TX_PI(cos));
+ if (rc)
+ goto out;
+ }
+ }
+ }
+
+ /* Prepare and send the vport enable */
+ vport_update_params->vport_id = start.vport_id;
+ vport_update_params->update_vport_active_flg = 1;
+ vport_update_params->vport_active_flg = 1;
+
+ if ((qed_info->b_inter_pf_switch || pci_num_vf(edev->pdev)) &&
+ qed_info->tx_switching) {
+ vport_update_params->update_tx_switching_flg = 1;
+ vport_update_params->tx_switching_flg = 1;
+ }
+
+ qede_fill_rss_params(edev, &vport_update_params->rss_params,
+ &vport_update_params->update_rss_flg);
+
+ rc = edev->ops->vport_update(cdev, vport_update_params);
+ if (rc)
+ DP_ERR(edev, "Update V-PORT failed %d\n", rc);
+
+out:
+ vfree(vport_update_params);
+ return rc;
+}
+
+enum qede_unload_mode {
+ QEDE_UNLOAD_NORMAL,
+ QEDE_UNLOAD_RECOVERY,
+};
+
+static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode,
+ bool is_locked)
+{
+ struct qed_link_params link_params;
+ int rc;
+
+ DP_INFO(edev, "Starting qede unload\n");
+
+ if (!is_locked)
+ __qede_lock(edev);
+
+ clear_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags);
+
+ if (mode != QEDE_UNLOAD_RECOVERY)
+ edev->state = QEDE_STATE_CLOSED;
+
+ qede_rdma_dev_event_close(edev);
+
+ /* Close OS Tx */
+ netif_tx_disable(edev->ndev);
+ netif_carrier_off(edev->ndev);
+
+ if (mode != QEDE_UNLOAD_RECOVERY) {
+ /* Reset the link */
+ memset(&link_params, 0, sizeof(link_params));
+ link_params.link_up = false;
+ edev->ops->common->set_link(edev->cdev, &link_params);
+
+ rc = qede_stop_queues(edev);
+ if (rc) {
+#ifdef CONFIG_RFS_ACCEL
+ if (edev->dev_info.common.b_arfs_capable) {
+ qede_poll_for_freeing_arfs_filters(edev);
+ if (edev->ndev->rx_cpu_rmap)
+ free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap);
+
+ edev->ndev->rx_cpu_rmap = NULL;
+ }
+#endif
+ qede_sync_free_irqs(edev);
+ goto out;
+ }
+
+ DP_INFO(edev, "Stopped Queues\n");
+ }
+
+ qede_vlan_mark_nonconfigured(edev);
+ edev->ops->fastpath_stop(edev->cdev);
+
+ if (edev->dev_info.common.b_arfs_capable) {
+ qede_poll_for_freeing_arfs_filters(edev);
+ qede_free_arfs(edev);
+ }
+
+ /* Release the interrupts */
+ qede_sync_free_irqs(edev);
+ edev->ops->common->set_fp_int(edev->cdev, 0);
+
+ qede_napi_disable_remove(edev);
+
+ if (mode == QEDE_UNLOAD_RECOVERY)
+ qede_empty_tx_queues(edev);
+
+ qede_free_mem_load(edev);
+ qede_free_fp_array(edev);
+
+out:
+ if (!is_locked)
+ __qede_unlock(edev);
+
+ if (mode != QEDE_UNLOAD_RECOVERY)
+ DP_NOTICE(edev, "Link is down\n");
+
+ edev->ptp_skip_txts = 0;
+
+ DP_INFO(edev, "Ending qede unload\n");
+}
+
+enum qede_load_mode {
+ QEDE_LOAD_NORMAL,
+ QEDE_LOAD_RELOAD,
+ QEDE_LOAD_RECOVERY,
+};
+
+static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
+ bool is_locked)
+{
+ struct qed_link_params link_params;
+ struct ethtool_coalesce coal = {};
+ u8 num_tc;
+ int rc, i;
+
+ DP_INFO(edev, "Starting qede load\n");
+
+ if (!is_locked)
+ __qede_lock(edev);
+
+ rc = qede_set_num_queues(edev);
+ if (rc)
+ goto out;
+
+ rc = qede_alloc_fp_array(edev);
+ if (rc)
+ goto out;
+
+ qede_init_fp(edev);
+
+ rc = qede_alloc_mem_load(edev);
+ if (rc)
+ goto err1;
+ DP_INFO(edev, "Allocated %d Rx, %d Tx queues\n",
+ QEDE_RSS_COUNT(edev), QEDE_TSS_COUNT(edev));
+
+ rc = qede_set_real_num_queues(edev);
+ if (rc)
+ goto err2;
+
+ if (qede_alloc_arfs(edev)) {
+ edev->ndev->features &= ~NETIF_F_NTUPLE;
+ edev->dev_info.common.b_arfs_capable = false;
+ }
+
+ qede_napi_add_enable(edev);
+ DP_INFO(edev, "Napi added and enabled\n");
+
+ rc = qede_setup_irqs(edev);
+ if (rc)
+ goto err3;
+ DP_INFO(edev, "Setup IRQs succeeded\n");
+
+ rc = qede_start_queues(edev, mode != QEDE_LOAD_RELOAD);
+ if (rc)
+ goto err4;
+ DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n");
+
+ num_tc = netdev_get_num_tc(edev->ndev);
+ num_tc = num_tc ? num_tc : edev->dev_info.num_tc;
+ qede_setup_tc(edev->ndev, num_tc);
+
+ /* Program un-configured VLANs */
+ qede_configure_vlan_filters(edev);
+
+ set_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags);
+
+ /* Ask for link-up using current configuration */
+ memset(&link_params, 0, sizeof(link_params));
+ link_params.link_up = true;
+ edev->ops->common->set_link(edev->cdev, &link_params);
+
+ edev->state = QEDE_STATE_OPEN;
+
+ coal.rx_coalesce_usecs = QED_DEFAULT_RX_USECS;
+ coal.tx_coalesce_usecs = QED_DEFAULT_TX_USECS;
+
+ for_each_queue(i) {
+ if (edev->coal_entry[i].isvalid) {
+ coal.rx_coalesce_usecs = edev->coal_entry[i].rxc;
+ coal.tx_coalesce_usecs = edev->coal_entry[i].txc;
+ }
+ __qede_unlock(edev);
+ qede_set_per_coalesce(edev->ndev, i, &coal);
+ __qede_lock(edev);
+ }
+ DP_INFO(edev, "Ending successfully qede load\n");
+
+ goto out;
+err4:
+ qede_sync_free_irqs(edev);
+err3:
+ qede_napi_disable_remove(edev);
+err2:
+ qede_free_mem_load(edev);
+err1:
+ edev->ops->common->set_fp_int(edev->cdev, 0);
+ qede_free_fp_array(edev);
+ edev->num_queues = 0;
+ edev->fp_num_tx = 0;
+ edev->fp_num_rx = 0;
+out:
+ if (!is_locked)
+ __qede_unlock(edev);
+
+ return rc;
+}
+
+/* 'func' should be able to run between unload and reload assuming interface
+ * is actually running, or afterwards in case it's currently DOWN.
+ */
+void qede_reload(struct qede_dev *edev,
+ struct qede_reload_args *args, bool is_locked)
+{
+ if (!is_locked)
+ __qede_lock(edev);
+
+ /* Since qede_lock is held, internal state wouldn't change even
+ * if netdev state would start transitioning. Check whether current
+ * internal configuration indicates device is up, then reload.
+ */
+ if (edev->state == QEDE_STATE_OPEN) {
+ qede_unload(edev, QEDE_UNLOAD_NORMAL, true);
+ if (args)
+ args->func(edev, args);
+ qede_load(edev, QEDE_LOAD_RELOAD, true);
+
+ /* Since no one is going to do it for us, re-configure */
+ qede_config_rx_mode(edev->ndev);
+ } else if (args) {
+ args->func(edev, args);
+ }
+
+ if (!is_locked)
+ __qede_unlock(edev);
+}
+
+/* called with rtnl_lock */
+static int qede_open(struct net_device *ndev)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+ int rc;
+
+ netif_carrier_off(ndev);
+
+ edev->ops->common->set_power_state(edev->cdev, PCI_D0);
+
+ rc = qede_load(edev, QEDE_LOAD_NORMAL, false);
+ if (rc)
+ return rc;
+
+ udp_tunnel_nic_reset_ntf(ndev);
+
+ edev->ops->common->update_drv_state(edev->cdev, true);
+
+ return 0;
+}
+
+static int qede_close(struct net_device *ndev)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+
+ qede_unload(edev, QEDE_UNLOAD_NORMAL, false);
+
+ if (edev->cdev)
+ edev->ops->common->update_drv_state(edev->cdev, false);
+
+ return 0;
+}
+
+static void qede_link_update(void *dev, struct qed_link_output *link)
+{
+ struct qede_dev *edev = dev;
+
+ if (!test_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags)) {
+ DP_VERBOSE(edev, NETIF_MSG_LINK, "Interface is not ready\n");
+ return;
+ }
+
+ if (link->link_up) {
+ if (!netif_carrier_ok(edev->ndev)) {
+ DP_NOTICE(edev, "Link is up\n");
+ netif_tx_start_all_queues(edev->ndev);
+ netif_carrier_on(edev->ndev);
+ qede_rdma_dev_event_open(edev);
+ }
+ } else {
+ if (netif_carrier_ok(edev->ndev)) {
+ DP_NOTICE(edev, "Link is down\n");
+ netif_tx_disable(edev->ndev);
+ netif_carrier_off(edev->ndev);
+ qede_rdma_dev_event_close(edev);
+ }
+ }
+}
+
+static void qede_schedule_recovery_handler(void *dev)
+{
+ struct qede_dev *edev = dev;
+
+ if (edev->state == QEDE_STATE_RECOVERY) {
+ DP_NOTICE(edev,
+ "Avoid scheduling a recovery handling since already in recovery state\n");
+ return;
+ }
+
+ set_bit(QEDE_SP_RECOVERY, &edev->sp_flags);
+ schedule_delayed_work(&edev->sp_task, 0);
+
+ DP_INFO(edev, "Scheduled a recovery handler\n");
+}
+
+static void qede_recovery_failed(struct qede_dev *edev)
+{
+ netdev_err(edev->ndev, "Recovery handling has failed. Power cycle is needed.\n");
+
+ netif_device_detach(edev->ndev);
+
+ if (edev->cdev)
+ edev->ops->common->set_power_state(edev->cdev, PCI_D3hot);
+}
+
+static void qede_recovery_handler(struct qede_dev *edev)
+{
+ u32 curr_state = edev->state;
+ int rc;
+
+ DP_NOTICE(edev, "Starting a recovery process\n");
+
+ /* No need to acquire first the qede_lock since is done by qede_sp_task
+ * before calling this function.
+ */
+ edev->state = QEDE_STATE_RECOVERY;
+
+ edev->ops->common->recovery_prolog(edev->cdev);
+
+ if (curr_state == QEDE_STATE_OPEN)
+ qede_unload(edev, QEDE_UNLOAD_RECOVERY, true);
+
+ __qede_remove(edev->pdev, QEDE_REMOVE_RECOVERY);
+
+ rc = __qede_probe(edev->pdev, edev->dp_module, edev->dp_level,
+ IS_VF(edev), QEDE_PROBE_RECOVERY);
+ if (rc) {
+ edev->cdev = NULL;
+ goto err;
+ }
+
+ if (curr_state == QEDE_STATE_OPEN) {
+ rc = qede_load(edev, QEDE_LOAD_RECOVERY, true);
+ if (rc)
+ goto err;
+
+ qede_config_rx_mode(edev->ndev);
+ udp_tunnel_nic_reset_ntf(edev->ndev);
+ }
+
+ edev->state = curr_state;
+
+ DP_NOTICE(edev, "Recovery handling is done\n");
+
+ return;
+
+err:
+ qede_recovery_failed(edev);
+}
+
+static void qede_atomic_hw_err_handler(struct qede_dev *edev)
+{
+ struct qed_dev *cdev = edev->cdev;
+
+ DP_NOTICE(edev,
+ "Generic non-sleepable HW error handling started - err_flags 0x%lx\n",
+ edev->err_flags);
+
+ /* Get a call trace of the flow that led to the error */
+ WARN_ON(test_bit(QEDE_ERR_WARN, &edev->err_flags));
+
+ /* Prevent HW attentions from being reasserted */
+ if (test_bit(QEDE_ERR_ATTN_CLR_EN, &edev->err_flags))
+ edev->ops->common->attn_clr_enable(cdev, true);
+
+ DP_NOTICE(edev, "Generic non-sleepable HW error handling is done\n");
+}
+
+static void qede_generic_hw_err_handler(struct qede_dev *edev)
+{
+ DP_NOTICE(edev,
+ "Generic sleepable HW error handling started - err_flags 0x%lx\n",
+ edev->err_flags);
+
+ if (edev->devlink) {
+ DP_NOTICE(edev, "Reporting fatal error to devlink\n");
+ edev->ops->common->report_fatal_error(edev->devlink, edev->last_err_type);
+ }
+
+ clear_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags);
+
+ DP_NOTICE(edev, "Generic sleepable HW error handling is done\n");
+}
+
+static void qede_set_hw_err_flags(struct qede_dev *edev,
+ enum qed_hw_err_type err_type)
+{
+ unsigned long err_flags = 0;
+
+ switch (err_type) {
+ case QED_HW_ERR_DMAE_FAIL:
+ set_bit(QEDE_ERR_WARN, &err_flags);
+ fallthrough;
+ case QED_HW_ERR_MFW_RESP_FAIL:
+ case QED_HW_ERR_HW_ATTN:
+ case QED_HW_ERR_RAMROD_FAIL:
+ case QED_HW_ERR_FW_ASSERT:
+ set_bit(QEDE_ERR_ATTN_CLR_EN, &err_flags);
+ set_bit(QEDE_ERR_GET_DBG_INFO, &err_flags);
+ /* make this error as recoverable and start recovery*/
+ set_bit(QEDE_ERR_IS_RECOVERABLE, &err_flags);
+ break;
+
+ default:
+ DP_NOTICE(edev, "Unexpected HW error [%d]\n", err_type);
+ break;
+ }
+
+ edev->err_flags |= err_flags;
+}
+
+static void qede_schedule_hw_err_handler(void *dev,
+ enum qed_hw_err_type err_type)
+{
+ struct qede_dev *edev = dev;
+
+ /* Fan failure cannot be masked by handling of another HW error or by a
+ * concurrent recovery process.
+ */
+ if ((test_and_set_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags) ||
+ edev->state == QEDE_STATE_RECOVERY) &&
+ err_type != QED_HW_ERR_FAN_FAIL) {
+ DP_INFO(edev,
+ "Avoid scheduling an error handling while another HW error is being handled\n");
+ return;
+ }
+
+ if (err_type >= QED_HW_ERR_LAST) {
+ DP_NOTICE(edev, "Unknown HW error [%d]\n", err_type);
+ clear_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags);
+ return;
+ }
+
+ edev->last_err_type = err_type;
+ qede_set_hw_err_flags(edev, err_type);
+ qede_atomic_hw_err_handler(edev);
+ set_bit(QEDE_SP_HW_ERR, &edev->sp_flags);
+ schedule_delayed_work(&edev->sp_task, 0);
+
+ DP_INFO(edev, "Scheduled a error handler [err_type %d]\n", err_type);
+}
+
+static bool qede_is_txq_full(struct qede_dev *edev, struct qede_tx_queue *txq)
+{
+ struct netdev_queue *netdev_txq;
+
+ netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id);
+ if (netif_xmit_stopped(netdev_txq))
+ return true;
+
+ return false;
+}
+
+static void qede_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data)
+{
+ struct qede_dev *edev = dev;
+ struct netdev_hw_addr *ha;
+ int i;
+
+ if (edev->ndev->features & NETIF_F_IP_CSUM)
+ data->feat_flags |= QED_TLV_IP_CSUM;
+ if (edev->ndev->features & NETIF_F_TSO)
+ data->feat_flags |= QED_TLV_LSO;
+
+ ether_addr_copy(data->mac[0], edev->ndev->dev_addr);
+ eth_zero_addr(data->mac[1]);
+ eth_zero_addr(data->mac[2]);
+ /* Copy the first two UC macs */
+ netif_addr_lock_bh(edev->ndev);
+ i = 1;
+ netdev_for_each_uc_addr(ha, edev->ndev) {
+ ether_addr_copy(data->mac[i++], ha->addr);
+ if (i == QED_TLV_MAC_COUNT)
+ break;
+ }
+
+ netif_addr_unlock_bh(edev->ndev);
+}
+
+static void qede_get_eth_tlv_data(void *dev, void *data)
+{
+ struct qed_mfw_tlv_eth *etlv = data;
+ struct qede_dev *edev = dev;
+ struct qede_fastpath *fp;
+ int i;
+
+ etlv->lso_maxoff_size = 0XFFFF;
+ etlv->lso_maxoff_size_set = true;
+ etlv->lso_minseg_size = (u16)ETH_TX_LSO_WINDOW_MIN_LEN;
+ etlv->lso_minseg_size_set = true;
+ etlv->prom_mode = !!(edev->ndev->flags & IFF_PROMISC);
+ etlv->prom_mode_set = true;
+ etlv->tx_descr_size = QEDE_TSS_COUNT(edev);
+ etlv->tx_descr_size_set = true;
+ etlv->rx_descr_size = QEDE_RSS_COUNT(edev);
+ etlv->rx_descr_size_set = true;
+ etlv->iov_offload = QED_MFW_TLV_IOV_OFFLOAD_VEB;
+ etlv->iov_offload_set = true;
+
+ /* Fill information regarding queues; Should be done under the qede
+ * lock to guarantee those don't change beneath our feet.
+ */
+ etlv->txqs_empty = true;
+ etlv->rxqs_empty = true;
+ etlv->num_txqs_full = 0;
+ etlv->num_rxqs_full = 0;
+
+ __qede_lock(edev);
+ for_each_queue(i) {
+ fp = &edev->fp_array[i];
+ if (fp->type & QEDE_FASTPATH_TX) {
+ struct qede_tx_queue *txq = QEDE_FP_TC0_TXQ(fp);
+
+ if (txq->sw_tx_cons != txq->sw_tx_prod)
+ etlv->txqs_empty = false;
+ if (qede_is_txq_full(edev, txq))
+ etlv->num_txqs_full++;
+ }
+ if (fp->type & QEDE_FASTPATH_RX) {
+ if (qede_has_rx_work(fp->rxq))
+ etlv->rxqs_empty = false;
+
+ /* This one is a bit tricky; Firmware might stop
+ * placing packets if ring is not yet full.
+ * Give an approximation.
+ */
+ if (le16_to_cpu(*fp->rxq->hw_cons_ptr) -
+ qed_chain_get_cons_idx(&fp->rxq->rx_comp_ring) >
+ RX_RING_SIZE - 100)
+ etlv->num_rxqs_full++;
+ }
+ }
+ __qede_unlock(edev);
+
+ etlv->txqs_empty_set = true;
+ etlv->rxqs_empty_set = true;
+ etlv->num_txqs_full_set = true;
+ etlv->num_rxqs_full_set = true;
+}
+
+/**
+ * qede_io_error_detected(): Called when PCI error is detected
+ *
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
+ *Return: pci_ers_result_t.
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t
+qede_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct qede_dev *edev = netdev_priv(dev);
+
+ if (!edev)
+ return PCI_ERS_RESULT_NONE;
+
+ DP_NOTICE(edev, "IO error detected [%d]\n", state);
+
+ __qede_lock(edev);
+ if (edev->state == QEDE_STATE_RECOVERY) {
+ DP_NOTICE(edev, "Device already in the recovery state\n");
+ __qede_unlock(edev);
+ return PCI_ERS_RESULT_NONE;
+ }
+
+ /* PF handles the recovery of its VFs */
+ if (IS_VF(edev)) {
+ DP_VERBOSE(edev, QED_MSG_IOV,
+ "VF recovery is handled by its PF\n");
+ __qede_unlock(edev);
+ return PCI_ERS_RESULT_RECOVERED;
+ }
+
+ /* Close OS Tx */
+ netif_tx_disable(edev->ndev);
+ netif_carrier_off(edev->ndev);
+
+ set_bit(QEDE_SP_AER, &edev->sp_flags);
+ schedule_delayed_work(&edev->sp_task, 0);
+
+ __qede_unlock(edev);
+
+ return PCI_ERS_RESULT_CAN_RECOVER;
+}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
new file mode 100644
index 000000000..c9c8225f0
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
@@ -0,0 +1,547 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/* QLogic qede NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ * Copyright (c) 2019-2020 Marvell International Ltd.
+ */
+
+#include "qede_ptp.h"
+#define QEDE_PTP_TX_TIMEOUT (2 * HZ)
+
+struct qede_ptp {
+ const struct qed_eth_ptp_ops *ops;
+ struct ptp_clock_info clock_info;
+ struct cyclecounter cc;
+ struct timecounter tc;
+ struct ptp_clock *clock;
+ struct work_struct work;
+ unsigned long ptp_tx_start;
+ struct qede_dev *edev;
+ struct sk_buff *tx_skb;
+
+ /* ptp spinlock is used for protecting the cycle/time counter fields
+ * and, also for serializing the qed PTP API invocations.
+ */
+ spinlock_t lock;
+ bool hw_ts_ioctl_called;
+ u16 tx_type;
+ u16 rx_filter;
+};
+
+/**
+ * qede_ptp_adjfreq() - Adjust the frequency of the PTP cycle counter.
+ *
+ * @info: The PTP clock info structure.
+ * @ppb: Parts per billion adjustment from base.
+ *
+ * Return: Zero on success, negative errno otherwise.
+ */
+static int qede_ptp_adjfreq(struct ptp_clock_info *info, s32 ppb)
+{
+ struct qede_ptp *ptp = container_of(info, struct qede_ptp, clock_info);
+ struct qede_dev *edev = ptp->edev;
+ int rc;
+
+ __qede_lock(edev);
+ if (edev->state == QEDE_STATE_OPEN) {
+ spin_lock_bh(&ptp->lock);
+ rc = ptp->ops->adjfreq(edev->cdev, ppb);
+ spin_unlock_bh(&ptp->lock);
+ } else {
+ DP_ERR(edev, "PTP adjfreq called while interface is down\n");
+ rc = -EFAULT;
+ }
+ __qede_unlock(edev);
+
+ return rc;
+}
+
+static int qede_ptp_adjtime(struct ptp_clock_info *info, s64 delta)
+{
+ struct qede_dev *edev;
+ struct qede_ptp *ptp;
+
+ ptp = container_of(info, struct qede_ptp, clock_info);
+ edev = ptp->edev;
+
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP adjtime called, delta = %llx\n",
+ delta);
+
+ spin_lock_bh(&ptp->lock);
+ timecounter_adjtime(&ptp->tc, delta);
+ spin_unlock_bh(&ptp->lock);
+
+ return 0;
+}
+
+static int qede_ptp_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
+{
+ struct qede_dev *edev;
+ struct qede_ptp *ptp;
+ u64 ns;
+
+ ptp = container_of(info, struct qede_ptp, clock_info);
+ edev = ptp->edev;
+
+ spin_lock_bh(&ptp->lock);
+ ns = timecounter_read(&ptp->tc);
+ spin_unlock_bh(&ptp->lock);
+
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP gettime called, ns = %llu\n", ns);
+
+ *ts = ns_to_timespec64(ns);
+
+ return 0;
+}
+
+static int qede_ptp_settime(struct ptp_clock_info *info,
+ const struct timespec64 *ts)
+{
+ struct qede_dev *edev;
+ struct qede_ptp *ptp;
+ u64 ns;
+
+ ptp = container_of(info, struct qede_ptp, clock_info);
+ edev = ptp->edev;
+
+ ns = timespec64_to_ns(ts);
+
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "PTP settime called, ns = %llu\n", ns);
+
+ /* Re-init the timecounter */
+ spin_lock_bh(&ptp->lock);
+ timecounter_init(&ptp->tc, &ptp->cc, ns);
+ spin_unlock_bh(&ptp->lock);
+
+ return 0;
+}
+
+/* Enable (or disable) ancillary features of the phc subsystem */
+static int qede_ptp_ancillary_feature_enable(struct ptp_clock_info *info,
+ struct ptp_clock_request *rq,
+ int on)
+{
+ struct qede_dev *edev;
+ struct qede_ptp *ptp;
+
+ ptp = container_of(info, struct qede_ptp, clock_info);
+ edev = ptp->edev;
+
+ DP_ERR(edev, "PHC ancillary features are not supported\n");
+
+ return -ENOTSUPP;
+}
+
+static void qede_ptp_task(struct work_struct *work)
+{
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct qede_dev *edev;
+ struct qede_ptp *ptp;
+ u64 timestamp, ns;
+ bool timedout;
+ int rc;
+
+ ptp = container_of(work, struct qede_ptp, work);
+ edev = ptp->edev;
+ timedout = time_is_before_jiffies(ptp->ptp_tx_start +
+ QEDE_PTP_TX_TIMEOUT);
+
+ /* Read Tx timestamp registers */
+ spin_lock_bh(&ptp->lock);
+ rc = ptp->ops->read_tx_ts(edev->cdev, &timestamp);
+ spin_unlock_bh(&ptp->lock);
+ if (rc) {
+ if (unlikely(timedout)) {
+ DP_INFO(edev, "Tx timestamp is not recorded\n");
+ dev_kfree_skb_any(ptp->tx_skb);
+ ptp->tx_skb = NULL;
+ clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS,
+ &edev->flags);
+ edev->ptp_skip_txts++;
+ } else {
+ /* Reschedule to keep checking for a valid TS value */
+ schedule_work(&ptp->work);
+ }
+ return;
+ }
+
+ ns = timecounter_cyc2time(&ptp->tc, timestamp);
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ shhwtstamps.hwtstamp = ns_to_ktime(ns);
+ skb_tstamp_tx(ptp->tx_skb, &shhwtstamps);
+ dev_kfree_skb_any(ptp->tx_skb);
+ ptp->tx_skb = NULL;
+ clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags);
+
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "Tx timestamp, timestamp cycles = %llu, ns = %llu\n",
+ timestamp, ns);
+}
+
+/* Read the PHC. This API is invoked with ptp_lock held. */
+static u64 qede_ptp_read_cc(const struct cyclecounter *cc)
+{
+ struct qede_dev *edev;
+ struct qede_ptp *ptp;
+ u64 phc_cycles;
+ int rc;
+
+ ptp = container_of(cc, struct qede_ptp, cc);
+ edev = ptp->edev;
+ rc = ptp->ops->read_cc(edev->cdev, &phc_cycles);
+ if (rc)
+ WARN_ONCE(1, "PHC read err %d\n", rc);
+
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "PHC read cycles = %llu\n", phc_cycles);
+
+ return phc_cycles;
+}
+
+static int qede_ptp_cfg_filters(struct qede_dev *edev)
+{
+ enum qed_ptp_hwtstamp_tx_type tx_type = QED_PTP_HWTSTAMP_TX_ON;
+ enum qed_ptp_filter_type rx_filter = QED_PTP_FILTER_NONE;
+ struct qede_ptp *ptp = edev->ptp;
+
+ if (!ptp)
+ return -EIO;
+
+ if (!ptp->hw_ts_ioctl_called) {
+ DP_INFO(edev, "TS IOCTL not called\n");
+ return 0;
+ }
+
+ switch (ptp->tx_type) {
+ case HWTSTAMP_TX_ON:
+ set_bit(QEDE_FLAGS_TX_TIMESTAMPING_EN, &edev->flags);
+ tx_type = QED_PTP_HWTSTAMP_TX_ON;
+ break;
+
+ case HWTSTAMP_TX_OFF:
+ clear_bit(QEDE_FLAGS_TX_TIMESTAMPING_EN, &edev->flags);
+ tx_type = QED_PTP_HWTSTAMP_TX_OFF;
+ break;
+
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ case HWTSTAMP_TX_ONESTEP_P2P:
+ DP_ERR(edev, "One-step timestamping is not supported\n");
+ return -ERANGE;
+ }
+
+ spin_lock_bh(&ptp->lock);
+ switch (ptp->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ rx_filter = QED_PTP_FILTER_NONE;
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ ptp->rx_filter = HWTSTAMP_FILTER_NONE;
+ rx_filter = QED_PTP_FILTER_ALL;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ ptp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ rx_filter = QED_PTP_FILTER_V1_L4_EVENT;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ ptp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ /* Initialize PTP detection for UDP/IPv4 events */
+ rx_filter = QED_PTP_FILTER_V1_L4_GEN;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+ rx_filter = QED_PTP_FILTER_V2_L4_EVENT;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+ /* Initialize PTP detection for UDP/IPv4 or UDP/IPv6 events */
+ rx_filter = QED_PTP_FILTER_V2_L4_GEN;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ rx_filter = QED_PTP_FILTER_V2_L2_EVENT;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ /* Initialize PTP detection L2 events */
+ rx_filter = QED_PTP_FILTER_V2_L2_GEN;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ rx_filter = QED_PTP_FILTER_V2_EVENT;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ ptp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ /* Initialize PTP detection L2, UDP/IPv4 or UDP/IPv6 events */
+ rx_filter = QED_PTP_FILTER_V2_GEN;
+ break;
+ }
+
+ ptp->ops->cfg_filters(edev->cdev, rx_filter, tx_type);
+
+ spin_unlock_bh(&ptp->lock);
+
+ return 0;
+}
+
+int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *ifr)
+{
+ struct hwtstamp_config config;
+ struct qede_ptp *ptp;
+ int rc;
+
+ ptp = edev->ptp;
+ if (!ptp)
+ return -EIO;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "HWTSTAMP IOCTL: Requested tx_type = %d, requested rx_filters = %d\n",
+ config.tx_type, config.rx_filter);
+
+ ptp->hw_ts_ioctl_called = 1;
+ ptp->tx_type = config.tx_type;
+ ptp->rx_filter = config.rx_filter;
+
+ rc = qede_ptp_cfg_filters(edev);
+ if (rc)
+ return rc;
+
+ config.rx_filter = ptp->rx_filter;
+
+ return copy_to_user(ifr->ifr_data, &config,
+ sizeof(config)) ? -EFAULT : 0;
+}
+
+int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *info)
+{
+ struct qede_ptp *ptp = edev->ptp;
+
+ if (!ptp) {
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+ info->phc_index = -1;
+
+ return 0;
+ }
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ if (ptp->clock)
+ info->phc_index = ptp_clock_index(ptp->clock);
+ else
+ info->phc_index = -1;
+
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ);
+
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
+
+ return 0;
+}
+
+void qede_ptp_disable(struct qede_dev *edev)
+{
+ struct qede_ptp *ptp;
+
+ ptp = edev->ptp;
+ if (!ptp)
+ return;
+
+ if (ptp->clock) {
+ ptp_clock_unregister(ptp->clock);
+ ptp->clock = NULL;
+ }
+
+ /* Cancel PTP work queue. Should be done after the Tx queues are
+ * drained to prevent additional scheduling.
+ */
+ cancel_work_sync(&ptp->work);
+ if (ptp->tx_skb) {
+ dev_kfree_skb_any(ptp->tx_skb);
+ ptp->tx_skb = NULL;
+ clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags);
+ }
+
+ /* Disable PTP in HW */
+ spin_lock_bh(&ptp->lock);
+ ptp->ops->disable(edev->cdev);
+ spin_unlock_bh(&ptp->lock);
+
+ kfree(ptp);
+ edev->ptp = NULL;
+}
+
+static int qede_ptp_init(struct qede_dev *edev)
+{
+ struct qede_ptp *ptp;
+ int rc;
+
+ ptp = edev->ptp;
+ if (!ptp)
+ return -EINVAL;
+
+ spin_lock_init(&ptp->lock);
+
+ /* Configure PTP in HW */
+ rc = ptp->ops->enable(edev->cdev);
+ if (rc) {
+ DP_INFO(edev, "PTP HW enable failed\n");
+ return rc;
+ }
+
+ /* Init work queue for Tx timestamping */
+ INIT_WORK(&ptp->work, qede_ptp_task);
+
+ /* Init cyclecounter and timecounter */
+ memset(&ptp->cc, 0, sizeof(ptp->cc));
+ ptp->cc.read = qede_ptp_read_cc;
+ ptp->cc.mask = CYCLECOUNTER_MASK(64);
+ ptp->cc.shift = 0;
+ ptp->cc.mult = 1;
+
+ timecounter_init(&ptp->tc, &ptp->cc, ktime_to_ns(ktime_get_real()));
+
+ return 0;
+}
+
+int qede_ptp_enable(struct qede_dev *edev)
+{
+ struct qede_ptp *ptp;
+ int rc;
+
+ ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
+ if (!ptp) {
+ DP_INFO(edev, "Failed to allocate struct for PTP\n");
+ return -ENOMEM;
+ }
+
+ ptp->edev = edev;
+ ptp->ops = edev->ops->ptp;
+ if (!ptp->ops) {
+ DP_INFO(edev, "PTP enable failed\n");
+ rc = -EIO;
+ goto err1;
+ }
+
+ edev->ptp = ptp;
+
+ rc = qede_ptp_init(edev);
+ if (rc)
+ goto err1;
+
+ qede_ptp_cfg_filters(edev);
+
+ /* Fill the ptp_clock_info struct and register PTP clock */
+ ptp->clock_info.owner = THIS_MODULE;
+ snprintf(ptp->clock_info.name, 16, "%s", edev->ndev->name);
+ ptp->clock_info.max_adj = QED_MAX_PHC_DRIFT_PPB;
+ ptp->clock_info.n_alarm = 0;
+ ptp->clock_info.n_ext_ts = 0;
+ ptp->clock_info.n_per_out = 0;
+ ptp->clock_info.pps = 0;
+ ptp->clock_info.adjfreq = qede_ptp_adjfreq;
+ ptp->clock_info.adjtime = qede_ptp_adjtime;
+ ptp->clock_info.gettime64 = qede_ptp_gettime;
+ ptp->clock_info.settime64 = qede_ptp_settime;
+ ptp->clock_info.enable = qede_ptp_ancillary_feature_enable;
+
+ ptp->clock = ptp_clock_register(&ptp->clock_info, &edev->pdev->dev);
+ if (IS_ERR(ptp->clock)) {
+ DP_ERR(edev, "PTP clock registration failed\n");
+ qede_ptp_disable(edev);
+ rc = -EINVAL;
+ goto err2;
+ }
+
+ return 0;
+
+err1:
+ kfree(ptp);
+err2:
+ edev->ptp = NULL;
+
+ return rc;
+}
+
+void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb)
+{
+ struct qede_ptp *ptp;
+
+ ptp = edev->ptp;
+ if (!ptp)
+ return;
+
+ if (test_and_set_bit_lock(QEDE_FLAGS_PTP_TX_IN_PRORGESS,
+ &edev->flags)) {
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "Timestamping in progress\n");
+ edev->ptp_skip_txts++;
+ return;
+ }
+
+ if (unlikely(!test_bit(QEDE_FLAGS_TX_TIMESTAMPING_EN, &edev->flags))) {
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "Tx timestamping was not enabled, this pkt will not be timestamped\n");
+ clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags);
+ edev->ptp_skip_txts++;
+ } else if (unlikely(ptp->tx_skb)) {
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "Device supports a single outstanding pkt to ts, It will not be ts\n");
+ clear_bit_unlock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags);
+ edev->ptp_skip_txts++;
+ } else {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ /* schedule check for Tx timestamp */
+ ptp->tx_skb = skb_get(skb);
+ ptp->ptp_tx_start = jiffies;
+ schedule_work(&ptp->work);
+ }
+}
+
+void qede_ptp_rx_ts(struct qede_dev *edev, struct sk_buff *skb)
+{
+ struct qede_ptp *ptp;
+ u64 timestamp, ns;
+ int rc;
+
+ ptp = edev->ptp;
+ if (!ptp)
+ return;
+
+ spin_lock_bh(&ptp->lock);
+ rc = ptp->ops->read_rx_ts(edev->cdev, &timestamp);
+ if (rc) {
+ spin_unlock_bh(&ptp->lock);
+ DP_INFO(edev, "Invalid Rx timestamp\n");
+ return;
+ }
+
+ ns = timecounter_cyc2time(&ptp->tc, timestamp);
+ spin_unlock_bh(&ptp->lock);
+ skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "Rx timestamp, timestamp cycles = %llu, ns = %llu\n",
+ timestamp, ns);
+}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.h b/drivers/net/ethernet/qlogic/qede/qede_ptp.h
new file mode 100644
index 000000000..1db0f021c
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* QLogic qede NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ * Copyright (c) 2019-2020 Marvell International Ltd.
+ */
+
+#ifndef _QEDE_PTP_H_
+#define _QEDE_PTP_H_
+
+#include <linux/ptp_clock_kernel.h>
+#include <linux/net_tstamp.h>
+#include <linux/timecounter.h>
+#include "qede.h"
+
+void qede_ptp_rx_ts(struct qede_dev *edev, struct sk_buff *skb);
+void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb);
+int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *req);
+void qede_ptp_disable(struct qede_dev *edev);
+int qede_ptp_enable(struct qede_dev *edev);
+int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *ts);
+
+static inline void qede_ptp_record_rx_ts(struct qede_dev *edev,
+ union eth_rx_cqe *cqe,
+ struct sk_buff *skb)
+{
+ /* Check if this packet was timestamped */
+ if (unlikely(le16_to_cpu(cqe->fast_path_regular.pars_flags.flags) &
+ (1 << PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_SHIFT))) {
+ if (likely(le16_to_cpu(cqe->fast_path_regular.pars_flags.flags)
+ & (1 << PARSING_AND_ERR_FLAGS_TIMESYNCPKT_SHIFT))) {
+ qede_ptp_rx_ts(edev, skb);
+ } else {
+ DP_INFO(edev,
+ "Timestamp recorded for non PTP packets\n");
+ }
+ }
+}
+#endif /* _QEDE_PTP_H_ */
diff --git a/drivers/net/ethernet/qlogic/qede/qede_rdma.c b/drivers/net/ethernet/qlogic/qede/qede_rdma.c
new file mode 100644
index 000000000..6304514a6
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qede/qede_rdma.c
@@ -0,0 +1,355 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2017 QLogic Corporation
+ * Copyright (c) 2019-2020 Marvell International Ltd.
+ */
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/qed/qede_rdma.h>
+#include "qede.h"
+
+static struct qedr_driver *qedr_drv;
+static LIST_HEAD(qedr_dev_list);
+static DEFINE_MUTEX(qedr_dev_list_lock);
+
+bool qede_rdma_supported(struct qede_dev *dev)
+{
+ return dev->dev_info.common.rdma_supported;
+}
+
+static void _qede_rdma_dev_add(struct qede_dev *edev)
+{
+ if (!qedr_drv)
+ return;
+
+ /* Leftovers from previous error recovery */
+ edev->rdma_info.exp_recovery = false;
+ edev->rdma_info.qedr_dev = qedr_drv->add(edev->cdev, edev->pdev,
+ edev->ndev);
+}
+
+static int qede_rdma_create_wq(struct qede_dev *edev)
+{
+ INIT_LIST_HEAD(&edev->rdma_info.rdma_event_list);
+ kref_init(&edev->rdma_info.refcnt);
+ init_completion(&edev->rdma_info.event_comp);
+
+ edev->rdma_info.rdma_wq = create_singlethread_workqueue("rdma_wq");
+ if (!edev->rdma_info.rdma_wq) {
+ DP_NOTICE(edev, "qedr: Could not create workqueue\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void qede_rdma_cleanup_event(struct qede_dev *edev)
+{
+ struct list_head *head = &edev->rdma_info.rdma_event_list;
+ struct qede_rdma_event_work *event_node;
+
+ flush_workqueue(edev->rdma_info.rdma_wq);
+ while (!list_empty(head)) {
+ event_node = list_entry(head->next, struct qede_rdma_event_work,
+ list);
+ cancel_work_sync(&event_node->work);
+ list_del(&event_node->list);
+ kfree(event_node);
+ }
+}
+
+static void qede_rdma_complete_event(struct kref *ref)
+{
+ struct qede_rdma_dev *rdma_dev =
+ container_of(ref, struct qede_rdma_dev, refcnt);
+
+ /* no more events will be added after this */
+ complete(&rdma_dev->event_comp);
+}
+
+static void qede_rdma_destroy_wq(struct qede_dev *edev)
+{
+ /* Avoid race with add_event flow, make sure it finishes before
+ * we start accessing the list and cleaning up the work
+ */
+ kref_put(&edev->rdma_info.refcnt, qede_rdma_complete_event);
+ wait_for_completion(&edev->rdma_info.event_comp);
+
+ qede_rdma_cleanup_event(edev);
+ destroy_workqueue(edev->rdma_info.rdma_wq);
+ edev->rdma_info.rdma_wq = NULL;
+}
+
+int qede_rdma_dev_add(struct qede_dev *edev, bool recovery)
+{
+ int rc;
+
+ if (!qede_rdma_supported(edev))
+ return 0;
+
+ /* Cannot start qedr while recovering since it wasn't fully stopped */
+ if (recovery)
+ return 0;
+
+ rc = qede_rdma_create_wq(edev);
+ if (rc)
+ return rc;
+
+ INIT_LIST_HEAD(&edev->rdma_info.entry);
+ mutex_lock(&qedr_dev_list_lock);
+ list_add_tail(&edev->rdma_info.entry, &qedr_dev_list);
+ _qede_rdma_dev_add(edev);
+ mutex_unlock(&qedr_dev_list_lock);
+
+ return rc;
+}
+
+static void _qede_rdma_dev_remove(struct qede_dev *edev)
+{
+ if (qedr_drv && qedr_drv->remove && edev->rdma_info.qedr_dev)
+ qedr_drv->remove(edev->rdma_info.qedr_dev);
+}
+
+void qede_rdma_dev_remove(struct qede_dev *edev, bool recovery)
+{
+ if (!qede_rdma_supported(edev))
+ return;
+
+ /* Cannot remove qedr while recovering since it wasn't fully stopped */
+ if (!recovery) {
+ qede_rdma_destroy_wq(edev);
+ mutex_lock(&qedr_dev_list_lock);
+ if (!edev->rdma_info.exp_recovery)
+ _qede_rdma_dev_remove(edev);
+ edev->rdma_info.qedr_dev = NULL;
+ list_del(&edev->rdma_info.entry);
+ mutex_unlock(&qedr_dev_list_lock);
+ } else {
+ if (!edev->rdma_info.exp_recovery) {
+ mutex_lock(&qedr_dev_list_lock);
+ _qede_rdma_dev_remove(edev);
+ mutex_unlock(&qedr_dev_list_lock);
+ }
+ edev->rdma_info.exp_recovery = true;
+ }
+}
+
+static void _qede_rdma_dev_open(struct qede_dev *edev)
+{
+ if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
+ qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_UP);
+}
+
+static void qede_rdma_dev_open(struct qede_dev *edev)
+{
+ if (!qede_rdma_supported(edev))
+ return;
+
+ mutex_lock(&qedr_dev_list_lock);
+ _qede_rdma_dev_open(edev);
+ mutex_unlock(&qedr_dev_list_lock);
+}
+
+static void _qede_rdma_dev_close(struct qede_dev *edev)
+{
+ if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
+ qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_DOWN);
+}
+
+static void qede_rdma_dev_close(struct qede_dev *edev)
+{
+ if (!qede_rdma_supported(edev))
+ return;
+
+ mutex_lock(&qedr_dev_list_lock);
+ _qede_rdma_dev_close(edev);
+ mutex_unlock(&qedr_dev_list_lock);
+}
+
+static void qede_rdma_dev_shutdown(struct qede_dev *edev)
+{
+ if (!qede_rdma_supported(edev))
+ return;
+
+ mutex_lock(&qedr_dev_list_lock);
+ if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
+ qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_CLOSE);
+ mutex_unlock(&qedr_dev_list_lock);
+}
+
+int qede_rdma_register_driver(struct qedr_driver *drv)
+{
+ struct qede_dev *edev;
+ u8 qedr_counter = 0;
+
+ mutex_lock(&qedr_dev_list_lock);
+ if (qedr_drv) {
+ mutex_unlock(&qedr_dev_list_lock);
+ return -EINVAL;
+ }
+ qedr_drv = drv;
+
+ list_for_each_entry(edev, &qedr_dev_list, rdma_info.entry) {
+ struct net_device *ndev;
+
+ qedr_counter++;
+ _qede_rdma_dev_add(edev);
+ ndev = edev->ndev;
+ if (netif_running(ndev) && netif_oper_up(ndev))
+ _qede_rdma_dev_open(edev);
+ }
+ mutex_unlock(&qedr_dev_list_lock);
+
+ pr_notice("qedr: discovered and registered %d RDMA funcs\n",
+ qedr_counter);
+
+ return 0;
+}
+EXPORT_SYMBOL(qede_rdma_register_driver);
+
+void qede_rdma_unregister_driver(struct qedr_driver *drv)
+{
+ struct qede_dev *edev;
+
+ mutex_lock(&qedr_dev_list_lock);
+ list_for_each_entry(edev, &qedr_dev_list, rdma_info.entry) {
+ /* If device has experienced recovery it was already removed */
+ if (edev->rdma_info.qedr_dev && !edev->rdma_info.exp_recovery)
+ _qede_rdma_dev_remove(edev);
+ }
+ qedr_drv = NULL;
+ mutex_unlock(&qedr_dev_list_lock);
+}
+EXPORT_SYMBOL(qede_rdma_unregister_driver);
+
+static void qede_rdma_changeaddr(struct qede_dev *edev)
+{
+ if (!qede_rdma_supported(edev))
+ return;
+
+ if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
+ qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_CHANGE_ADDR);
+}
+
+static void qede_rdma_change_mtu(struct qede_dev *edev)
+{
+ if (qede_rdma_supported(edev)) {
+ if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
+ qedr_drv->notify(edev->rdma_info.qedr_dev,
+ QEDE_CHANGE_MTU);
+ }
+}
+
+static struct qede_rdma_event_work *
+qede_rdma_get_free_event_node(struct qede_dev *edev)
+{
+ struct qede_rdma_event_work *event_node = NULL;
+ bool found = false;
+
+ list_for_each_entry(event_node, &edev->rdma_info.rdma_event_list,
+ list) {
+ if (!work_pending(&event_node->work)) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ event_node = kzalloc(sizeof(*event_node), GFP_ATOMIC);
+ if (!event_node) {
+ DP_NOTICE(edev,
+ "qedr: Could not allocate memory for rdma work\n");
+ return NULL;
+ }
+ list_add_tail(&event_node->list,
+ &edev->rdma_info.rdma_event_list);
+ }
+
+ return event_node;
+}
+
+static void qede_rdma_handle_event(struct work_struct *work)
+{
+ struct qede_rdma_event_work *event_node;
+ enum qede_rdma_event event;
+ struct qede_dev *edev;
+
+ event_node = container_of(work, struct qede_rdma_event_work, work);
+ event = event_node->event;
+ edev = event_node->ptr;
+
+ switch (event) {
+ case QEDE_UP:
+ qede_rdma_dev_open(edev);
+ break;
+ case QEDE_DOWN:
+ qede_rdma_dev_close(edev);
+ break;
+ case QEDE_CLOSE:
+ qede_rdma_dev_shutdown(edev);
+ break;
+ case QEDE_CHANGE_ADDR:
+ qede_rdma_changeaddr(edev);
+ break;
+ case QEDE_CHANGE_MTU:
+ qede_rdma_change_mtu(edev);
+ break;
+ default:
+ DP_NOTICE(edev, "Invalid rdma event %d", event);
+ }
+}
+
+static void qede_rdma_add_event(struct qede_dev *edev,
+ enum qede_rdma_event event)
+{
+ struct qede_rdma_event_work *event_node;
+
+ /* If a recovery was experienced avoid adding the event */
+ if (edev->rdma_info.exp_recovery)
+ return;
+
+ if (!edev->rdma_info.qedr_dev || !edev->rdma_info.rdma_wq)
+ return;
+
+ /* We don't want the cleanup flow to start while we're allocating and
+ * scheduling the work
+ */
+ if (!kref_get_unless_zero(&edev->rdma_info.refcnt))
+ return; /* already being destroyed */
+
+ event_node = qede_rdma_get_free_event_node(edev);
+ if (!event_node)
+ goto out;
+
+ event_node->event = event;
+ event_node->ptr = edev;
+
+ INIT_WORK(&event_node->work, qede_rdma_handle_event);
+ queue_work(edev->rdma_info.rdma_wq, &event_node->work);
+
+out:
+ kref_put(&edev->rdma_info.refcnt, qede_rdma_complete_event);
+}
+
+void qede_rdma_dev_event_open(struct qede_dev *edev)
+{
+ qede_rdma_add_event(edev, QEDE_UP);
+}
+
+void qede_rdma_dev_event_close(struct qede_dev *edev)
+{
+ qede_rdma_add_event(edev, QEDE_DOWN);
+}
+
+void qede_rdma_event_changeaddr(struct qede_dev *edev)
+{
+ qede_rdma_add_event(edev, QEDE_CHANGE_ADDR);
+}
+
+void qede_rdma_event_change_mtu(struct qede_dev *edev)
+{
+ qede_rdma_add_event(edev, QEDE_CHANGE_MTU);
+}
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
new file mode 100644
index 000000000..fc78bc959
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -0,0 +1,3930 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * QLogic QLA3xxx NIC HBA Driver
+ * Copyright (c) 2003-2006 QLogic Corporation
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/dmapool.h>
+#include <linux/mempool.h>
+#include <linux/spinlock.h>
+#include <linux/kthread.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/ip.h>
+#include <linux/in.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <linux/if_vlan.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/prefetch.h>
+
+#include "qla3xxx.h"
+
+#define DRV_NAME "qla3xxx"
+#define DRV_STRING "QLogic ISP3XXX Network Driver"
+#define DRV_VERSION "v2.03.00-k5"
+
+static const char ql3xxx_driver_name[] = DRV_NAME;
+static const char ql3xxx_driver_version[] = DRV_VERSION;
+
+#define TIMED_OUT_MSG \
+"Timed out waiting for management port to get free before issuing command\n"
+
+MODULE_AUTHOR("QLogic Corporation");
+MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION " ");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+static const u32 default_msg
+ = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
+ | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
+
+static int debug = -1; /* defaults above */
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
+static int msi;
+module_param(msi, int, 0);
+MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts.");
+
+static const struct pci_device_id ql3xxx_pci_tbl[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)},
+ /* required last entry */
+ {0,}
+};
+
+MODULE_DEVICE_TABLE(pci, ql3xxx_pci_tbl);
+
+/*
+ * These are the known PHY's which are used
+ */
+enum PHY_DEVICE_TYPE {
+ PHY_TYPE_UNKNOWN = 0,
+ PHY_VITESSE_VSC8211,
+ PHY_AGERE_ET1011C,
+ MAX_PHY_DEV_TYPES
+};
+
+struct PHY_DEVICE_INFO {
+ const enum PHY_DEVICE_TYPE phyDevice;
+ const u32 phyIdOUI;
+ const u16 phyIdModel;
+ const char *name;
+};
+
+static const struct PHY_DEVICE_INFO PHY_DEVICES[] = {
+ {PHY_TYPE_UNKNOWN, 0x000000, 0x0, "PHY_TYPE_UNKNOWN"},
+ {PHY_VITESSE_VSC8211, 0x0003f1, 0xb, "PHY_VITESSE_VSC8211"},
+ {PHY_AGERE_ET1011C, 0x00a0bc, 0x1, "PHY_AGERE_ET1011C"},
+};
+
+
+/*
+ * Caller must take hw_lock.
+ */
+static int ql_sem_spinlock(struct ql3_adapter *qdev,
+ u32 sem_mask, u32 sem_bits)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 value;
+ unsigned int seconds = 3;
+
+ do {
+ writel((sem_mask | sem_bits),
+ &port_regs->CommonRegs.semaphoreReg);
+ value = readl(&port_regs->CommonRegs.semaphoreReg);
+ if ((value & (sem_mask >> 16)) == sem_bits)
+ return 0;
+ mdelay(1000);
+ } while (--seconds);
+ return -1;
+}
+
+static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ writel(sem_mask, &port_regs->CommonRegs.semaphoreReg);
+ readl(&port_regs->CommonRegs.semaphoreReg);
+}
+
+static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 value;
+
+ writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg);
+ value = readl(&port_regs->CommonRegs.semaphoreReg);
+ return ((value & (sem_mask >> 16)) == sem_bits);
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
+{
+ int i = 0;
+
+ do {
+ if (ql_sem_lock(qdev,
+ QL_DRVR_SEM_MASK,
+ (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
+ * 2) << 1)) {
+ netdev_printk(KERN_DEBUG, qdev->ndev,
+ "driver lock acquired\n");
+ return 1;
+ }
+ mdelay(1000);
+ } while (++i < 10);
+
+ netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n");
+ return 0;
+}
+
+static void ql_set_register_page(struct ql3_adapter *qdev, u32 page)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+
+ writel(((ISP_CONTROL_NP_MASK << 16) | page),
+ &port_regs->CommonRegs.ispControlStatus);
+ readl(&port_regs->CommonRegs.ispControlStatus);
+ qdev->current_page = page;
+}
+
+static u32 ql_read_common_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg)
+{
+ u32 value;
+ unsigned long hw_flags;
+
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+ value = readl(reg);
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+
+ return value;
+}
+
+static u32 ql_read_common_reg(struct ql3_adapter *qdev, u32 __iomem *reg)
+{
+ return readl(reg);
+}
+
+static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg)
+{
+ u32 value;
+ unsigned long hw_flags;
+
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+
+ if (qdev->current_page != 0)
+ ql_set_register_page(qdev, 0);
+ value = readl(reg);
+
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+ return value;
+}
+
+static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg)
+{
+ if (qdev->current_page != 0)
+ ql_set_register_page(qdev, 0);
+ return readl(reg);
+}
+
+static void ql_write_common_reg_l(struct ql3_adapter *qdev,
+ u32 __iomem *reg, u32 value)
+{
+ unsigned long hw_flags;
+
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+ writel(value, reg);
+ readl(reg);
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+}
+
+static void ql_write_common_reg(struct ql3_adapter *qdev,
+ u32 __iomem *reg, u32 value)
+{
+ writel(value, reg);
+ readl(reg);
+}
+
+static void ql_write_nvram_reg(struct ql3_adapter *qdev,
+ u32 __iomem *reg, u32 value)
+{
+ writel(value, reg);
+ readl(reg);
+ udelay(1);
+}
+
+static void ql_write_page0_reg(struct ql3_adapter *qdev,
+ u32 __iomem *reg, u32 value)
+{
+ if (qdev->current_page != 0)
+ ql_set_register_page(qdev, 0);
+ writel(value, reg);
+ readl(reg);
+}
+
+/*
+ * Caller holds hw_lock. Only called during init.
+ */
+static void ql_write_page1_reg(struct ql3_adapter *qdev,
+ u32 __iomem *reg, u32 value)
+{
+ if (qdev->current_page != 1)
+ ql_set_register_page(qdev, 1);
+ writel(value, reg);
+ readl(reg);
+}
+
+/*
+ * Caller holds hw_lock. Only called during init.
+ */
+static void ql_write_page2_reg(struct ql3_adapter *qdev,
+ u32 __iomem *reg, u32 value)
+{
+ if (qdev->current_page != 2)
+ ql_set_register_page(qdev, 2);
+ writel(value, reg);
+ readl(reg);
+}
+
+static void ql_disable_interrupts(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+
+ ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
+ (ISP_IMR_ENABLE_INT << 16));
+
+}
+
+static void ql_enable_interrupts(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+
+ ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
+ ((0xff << 16) | ISP_IMR_ENABLE_INT));
+
+}
+
+static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
+ struct ql_rcv_buf_cb *lrg_buf_cb)
+{
+ dma_addr_t map;
+ int err;
+ lrg_buf_cb->next = NULL;
+
+ if (qdev->lrg_buf_free_tail == NULL) { /* The list is empty */
+ qdev->lrg_buf_free_head = qdev->lrg_buf_free_tail = lrg_buf_cb;
+ } else {
+ qdev->lrg_buf_free_tail->next = lrg_buf_cb;
+ qdev->lrg_buf_free_tail = lrg_buf_cb;
+ }
+
+ if (!lrg_buf_cb->skb) {
+ lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
+ qdev->lrg_buffer_len);
+ if (unlikely(!lrg_buf_cb->skb)) {
+ qdev->lrg_buf_skb_check++;
+ } else {
+ /*
+ * We save some space to copy the ethhdr from first
+ * buffer
+ */
+ skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
+ map = dma_map_single(&qdev->pdev->dev,
+ lrg_buf_cb->skb->data,
+ qdev->lrg_buffer_len - QL_HEADER_SPACE,
+ DMA_FROM_DEVICE);
+ err = dma_mapping_error(&qdev->pdev->dev, map);
+ if (err) {
+ netdev_err(qdev->ndev,
+ "PCI mapping failed with error: %d\n",
+ err);
+ dev_kfree_skb(lrg_buf_cb->skb);
+ lrg_buf_cb->skb = NULL;
+
+ qdev->lrg_buf_skb_check++;
+ return;
+ }
+
+ lrg_buf_cb->buf_phy_addr_low =
+ cpu_to_le32(LS_64BITS(map));
+ lrg_buf_cb->buf_phy_addr_high =
+ cpu_to_le32(MS_64BITS(map));
+ dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
+ dma_unmap_len_set(lrg_buf_cb, maplen,
+ qdev->lrg_buffer_len -
+ QL_HEADER_SPACE);
+ }
+ }
+
+ qdev->lrg_buf_free_count++;
+}
+
+static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter
+ *qdev)
+{
+ struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head;
+
+ if (lrg_buf_cb != NULL) {
+ qdev->lrg_buf_free_head = lrg_buf_cb->next;
+ if (qdev->lrg_buf_free_head == NULL)
+ qdev->lrg_buf_free_tail = NULL;
+ qdev->lrg_buf_free_count--;
+ }
+
+ return lrg_buf_cb;
+}
+
+static u32 addrBits = EEPROM_NO_ADDR_BITS;
+static u32 dataBits = EEPROM_NO_DATA_BITS;
+
+static void fm93c56a_deselect(struct ql3_adapter *qdev);
+static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr,
+ unsigned short *value);
+
+/*
+ * Caller holds hw_lock.
+ */
+static void fm93c56a_select(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
+
+ qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1;
+ ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr)
+{
+ int i;
+ u32 mask;
+ u32 dataBit;
+ u32 previousBit;
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
+
+ /* Clock in a zero, then do the start bit */
+ ql_write_nvram_reg(qdev, spir,
+ (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
+ AUBURN_EEPROM_DO_1));
+ ql_write_nvram_reg(qdev, spir,
+ (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
+ AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_RISE));
+ ql_write_nvram_reg(qdev, spir,
+ (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
+ AUBURN_EEPROM_DO_1 | AUBURN_EEPROM_CLK_FALL));
+
+ mask = 1 << (FM93C56A_CMD_BITS - 1);
+ /* Force the previous data bit to be different */
+ previousBit = 0xffff;
+ for (i = 0; i < FM93C56A_CMD_BITS; i++) {
+ dataBit = (cmd & mask)
+ ? AUBURN_EEPROM_DO_1
+ : AUBURN_EEPROM_DO_0;
+ if (previousBit != dataBit) {
+ /* If the bit changed, change the DO state to match */
+ ql_write_nvram_reg(qdev, spir,
+ (ISP_NVRAM_MASK |
+ qdev->eeprom_cmd_data | dataBit));
+ previousBit = dataBit;
+ }
+ ql_write_nvram_reg(qdev, spir,
+ (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
+ dataBit | AUBURN_EEPROM_CLK_RISE));
+ ql_write_nvram_reg(qdev, spir,
+ (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
+ dataBit | AUBURN_EEPROM_CLK_FALL));
+ cmd = cmd << 1;
+ }
+
+ mask = 1 << (addrBits - 1);
+ /* Force the previous data bit to be different */
+ previousBit = 0xffff;
+ for (i = 0; i < addrBits; i++) {
+ dataBit = (eepromAddr & mask) ? AUBURN_EEPROM_DO_1
+ : AUBURN_EEPROM_DO_0;
+ if (previousBit != dataBit) {
+ /*
+ * If the bit changed, then change the DO state to
+ * match
+ */
+ ql_write_nvram_reg(qdev, spir,
+ (ISP_NVRAM_MASK |
+ qdev->eeprom_cmd_data | dataBit));
+ previousBit = dataBit;
+ }
+ ql_write_nvram_reg(qdev, spir,
+ (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
+ dataBit | AUBURN_EEPROM_CLK_RISE));
+ ql_write_nvram_reg(qdev, spir,
+ (ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
+ dataBit | AUBURN_EEPROM_CLK_FALL));
+ eepromAddr = eepromAddr << 1;
+ }
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void fm93c56a_deselect(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
+
+ qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0;
+ ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value)
+{
+ int i;
+ u32 data = 0;
+ u32 dataBit;
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
+
+ /* Read the data bits */
+ /* The first bit is a dummy. Clock right over it. */
+ for (i = 0; i < dataBits; i++) {
+ ql_write_nvram_reg(qdev, spir,
+ ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
+ AUBURN_EEPROM_CLK_RISE);
+ ql_write_nvram_reg(qdev, spir,
+ ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
+ AUBURN_EEPROM_CLK_FALL);
+ dataBit = (ql_read_common_reg(qdev, spir) &
+ AUBURN_EEPROM_DI_1) ? 1 : 0;
+ data = (data << 1) | dataBit;
+ }
+ *value = (u16)data;
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void eeprom_readword(struct ql3_adapter *qdev,
+ u32 eepromAddr, unsigned short *value)
+{
+ fm93c56a_select(qdev);
+ fm93c56a_cmd(qdev, (int)FM93C56A_READ, eepromAddr);
+ fm93c56a_datain(qdev, value);
+ fm93c56a_deselect(qdev);
+}
+
+static void ql_set_mac_addr(struct net_device *ndev, u16 *addr)
+{
+ __le16 buf[ETH_ALEN / 2];
+
+ buf[0] = cpu_to_le16(addr[0]);
+ buf[1] = cpu_to_le16(addr[1]);
+ buf[2] = cpu_to_le16(addr[2]);
+ eth_hw_addr_set(ndev, (u8 *)buf);
+}
+
+static int ql_get_nvram_params(struct ql3_adapter *qdev)
+{
+ u16 *pEEPROMData;
+ u16 checksum = 0;
+ u32 index;
+ unsigned long hw_flags;
+
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+
+ pEEPROMData = (u16 *)&qdev->nvram_data;
+ qdev->eeprom_cmd_data = 0;
+ if (ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK,
+ (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
+ 2) << 10)) {
+ pr_err("%s: Failed ql_sem_spinlock()\n", __func__);
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+ return -1;
+ }
+
+ for (index = 0; index < EEPROM_SIZE; index++) {
+ eeprom_readword(qdev, index, pEEPROMData);
+ checksum += *pEEPROMData;
+ pEEPROMData++;
+ }
+ ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK);
+
+ if (checksum != 0) {
+ netdev_err(qdev->ndev, "checksum should be zero, is %x!!\n",
+ checksum);
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+ return -1;
+ }
+
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+ return checksum;
+}
+
+static const u32 PHYAddr[2] = {
+ PORT0_PHY_ADDRESS, PORT1_PHY_ADDRESS
+};
+
+static int ql_wait_for_mii_ready(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 temp;
+ int count = 1000;
+
+ while (count) {
+ temp = ql_read_page0_reg(qdev, &port_regs->macMIIStatusReg);
+ if (!(temp & MAC_MII_STATUS_BSY))
+ return 0;
+ udelay(10);
+ count--;
+ }
+ return -1;
+}
+
+static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 scanControl;
+
+ if (qdev->numPorts > 1) {
+ /* Auto scan will cycle through multiple ports */
+ scanControl = MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC;
+ } else {
+ scanControl = MAC_MII_CONTROL_SC;
+ }
+
+ /*
+ * Scan register 1 of PHY/PETBI,
+ * Set up to scan both devices
+ * The autoscan starts from the first register, completes
+ * the last one before rolling over to the first
+ */
+ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
+ PHYAddr[0] | MII_SCAN_REGISTER);
+
+ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
+ (scanControl) |
+ ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS) << 16));
+}
+
+static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev)
+{
+ u8 ret;
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+
+ /* See if scan mode is enabled before we turn it off */
+ if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) &
+ (MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC)) {
+ /* Scan is enabled */
+ ret = 1;
+ } else {
+ /* Scan is disabled */
+ ret = 0;
+ }
+
+ /*
+ * When disabling scan mode you must first change the MII register
+ * address
+ */
+ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
+ PHYAddr[0] | MII_SCAN_REGISTER);
+
+ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
+ ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS |
+ MAC_MII_CONTROL_RC) << 16));
+
+ return ret;
+}
+
+static int ql_mii_write_reg_ex(struct ql3_adapter *qdev,
+ u16 regAddr, u16 value, u32 phyAddr)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u8 scanWasEnabled;
+
+ scanWasEnabled = ql_mii_disable_scan_mode(qdev);
+
+ if (ql_wait_for_mii_ready(qdev)) {
+ netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
+ return -1;
+ }
+
+ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
+ phyAddr | regAddr);
+
+ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
+
+ /* Wait for write to complete 9/10/04 SJP */
+ if (ql_wait_for_mii_ready(qdev)) {
+ netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
+ return -1;
+ }
+
+ if (scanWasEnabled)
+ ql_mii_enable_scan_mode(qdev);
+
+ return 0;
+}
+
+static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr,
+ u16 *value, u32 phyAddr)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u8 scanWasEnabled;
+ u32 temp;
+
+ scanWasEnabled = ql_mii_disable_scan_mode(qdev);
+
+ if (ql_wait_for_mii_ready(qdev)) {
+ netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
+ return -1;
+ }
+
+ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
+ phyAddr | regAddr);
+
+ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
+ (MAC_MII_CONTROL_RC << 16));
+
+ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
+ (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC);
+
+ /* Wait for the read to complete */
+ if (ql_wait_for_mii_ready(qdev)) {
+ netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
+ return -1;
+ }
+
+ temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
+ *value = (u16) temp;
+
+ if (scanWasEnabled)
+ ql_mii_enable_scan_mode(qdev);
+
+ return 0;
+}
+
+static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+
+ ql_mii_disable_scan_mode(qdev);
+
+ if (ql_wait_for_mii_ready(qdev)) {
+ netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
+ return -1;
+ }
+
+ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
+ qdev->PHYAddr | regAddr);
+
+ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
+
+ /* Wait for write to complete. */
+ if (ql_wait_for_mii_ready(qdev)) {
+ netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
+ return -1;
+ }
+
+ ql_mii_enable_scan_mode(qdev);
+
+ return 0;
+}
+
+static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value)
+{
+ u32 temp;
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+
+ ql_mii_disable_scan_mode(qdev);
+
+ if (ql_wait_for_mii_ready(qdev)) {
+ netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
+ return -1;
+ }
+
+ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
+ qdev->PHYAddr | regAddr);
+
+ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
+ (MAC_MII_CONTROL_RC << 16));
+
+ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
+ (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC);
+
+ /* Wait for the read to complete */
+ if (ql_wait_for_mii_ready(qdev)) {
+ netif_warn(qdev, link, qdev->ndev, TIMED_OUT_MSG);
+ return -1;
+ }
+
+ temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
+ *value = (u16) temp;
+
+ ql_mii_enable_scan_mode(qdev);
+
+ return 0;
+}
+
+static void ql_petbi_reset(struct ql3_adapter *qdev)
+{
+ ql_mii_write_reg(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET);
+}
+
+static void ql_petbi_start_neg(struct ql3_adapter *qdev)
+{
+ u16 reg;
+
+ /* Enable Auto-negotiation sense */
+ ql_mii_read_reg(qdev, PETBI_TBI_CTRL, &reg);
+ reg |= PETBI_TBI_AUTO_SENSE;
+ ql_mii_write_reg(qdev, PETBI_TBI_CTRL, reg);
+
+ ql_mii_write_reg(qdev, PETBI_NEG_ADVER,
+ PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX);
+
+ ql_mii_write_reg(qdev, PETBI_CONTROL_REG,
+ PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG |
+ PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000);
+
+}
+
+static void ql_petbi_reset_ex(struct ql3_adapter *qdev)
+{
+ ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET,
+ PHYAddr[qdev->mac_index]);
+}
+
+static void ql_petbi_start_neg_ex(struct ql3_adapter *qdev)
+{
+ u16 reg;
+
+ /* Enable Auto-negotiation sense */
+ ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, &reg,
+ PHYAddr[qdev->mac_index]);
+ reg |= PETBI_TBI_AUTO_SENSE;
+ ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg,
+ PHYAddr[qdev->mac_index]);
+
+ ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER,
+ PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX,
+ PHYAddr[qdev->mac_index]);
+
+ ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG,
+ PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG |
+ PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000,
+ PHYAddr[qdev->mac_index]);
+}
+
+static void ql_petbi_init(struct ql3_adapter *qdev)
+{
+ ql_petbi_reset(qdev);
+ ql_petbi_start_neg(qdev);
+}
+
+static void ql_petbi_init_ex(struct ql3_adapter *qdev)
+{
+ ql_petbi_reset_ex(qdev);
+ ql_petbi_start_neg_ex(qdev);
+}
+
+static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev)
+{
+ u16 reg;
+
+ if (ql_mii_read_reg(qdev, PETBI_NEG_PARTNER, &reg) < 0)
+ return 0;
+
+ return (reg & PETBI_NEG_PAUSE_MASK) == PETBI_NEG_PAUSE;
+}
+
+static void phyAgereSpecificInit(struct ql3_adapter *qdev, u32 miiAddr)
+{
+ netdev_info(qdev->ndev, "enabling Agere specific PHY\n");
+ /* power down device bit 11 = 1 */
+ ql_mii_write_reg_ex(qdev, 0x00, 0x1940, miiAddr);
+ /* enable diagnostic mode bit 2 = 1 */
+ ql_mii_write_reg_ex(qdev, 0x12, 0x840e, miiAddr);
+ /* 1000MB amplitude adjust (see Agere errata) */
+ ql_mii_write_reg_ex(qdev, 0x10, 0x8805, miiAddr);
+ /* 1000MB amplitude adjust (see Agere errata) */
+ ql_mii_write_reg_ex(qdev, 0x11, 0xf03e, miiAddr);
+ /* 100MB amplitude adjust (see Agere errata) */
+ ql_mii_write_reg_ex(qdev, 0x10, 0x8806, miiAddr);
+ /* 100MB amplitude adjust (see Agere errata) */
+ ql_mii_write_reg_ex(qdev, 0x11, 0x003e, miiAddr);
+ /* 10MB amplitude adjust (see Agere errata) */
+ ql_mii_write_reg_ex(qdev, 0x10, 0x8807, miiAddr);
+ /* 10MB amplitude adjust (see Agere errata) */
+ ql_mii_write_reg_ex(qdev, 0x11, 0x1f00, miiAddr);
+ /* point to hidden reg 0x2806 */
+ ql_mii_write_reg_ex(qdev, 0x10, 0x2806, miiAddr);
+ /* Write new PHYAD w/bit 5 set */
+ ql_mii_write_reg_ex(qdev, 0x11,
+ 0x0020 | (PHYAddr[qdev->mac_index] >> 8), miiAddr);
+ /*
+ * Disable diagnostic mode bit 2 = 0
+ * Power up device bit 11 = 0
+ * Link up (on) and activity (blink)
+ */
+ ql_mii_write_reg(qdev, 0x12, 0x840a);
+ ql_mii_write_reg(qdev, 0x00, 0x1140);
+ ql_mii_write_reg(qdev, 0x1c, 0xfaf0);
+}
+
+static enum PHY_DEVICE_TYPE getPhyType(struct ql3_adapter *qdev,
+ u16 phyIdReg0, u16 phyIdReg1)
+{
+ enum PHY_DEVICE_TYPE result = PHY_TYPE_UNKNOWN;
+ u32 oui;
+ u16 model;
+ int i;
+
+ if (phyIdReg0 == 0xffff)
+ return result;
+
+ if (phyIdReg1 == 0xffff)
+ return result;
+
+ /* oui is split between two registers */
+ oui = (phyIdReg0 << 6) | ((phyIdReg1 & PHY_OUI_1_MASK) >> 10);
+
+ model = (phyIdReg1 & PHY_MODEL_MASK) >> 4;
+
+ /* Scan table for this PHY */
+ for (i = 0; i < MAX_PHY_DEV_TYPES; i++) {
+ if ((oui == PHY_DEVICES[i].phyIdOUI) &&
+ (model == PHY_DEVICES[i].phyIdModel)) {
+ netdev_info(qdev->ndev, "Phy: %s\n",
+ PHY_DEVICES[i].name);
+ result = PHY_DEVICES[i].phyDevice;
+ break;
+ }
+ }
+
+ return result;
+}
+
+static int ql_phy_get_speed(struct ql3_adapter *qdev)
+{
+ u16 reg;
+
+ switch (qdev->phyType) {
+ case PHY_AGERE_ET1011C: {
+ if (ql_mii_read_reg(qdev, 0x1A, &reg) < 0)
+ return 0;
+
+ reg = (reg >> 8) & 3;
+ break;
+ }
+ default:
+ if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
+ return 0;
+
+ reg = (((reg & 0x18) >> 3) & 3);
+ }
+
+ switch (reg) {
+ case 2:
+ return SPEED_1000;
+ case 1:
+ return SPEED_100;
+ case 0:
+ return SPEED_10;
+ default:
+ return -1;
+ }
+}
+
+static int ql_is_full_dup(struct ql3_adapter *qdev)
+{
+ u16 reg;
+
+ switch (qdev->phyType) {
+ case PHY_AGERE_ET1011C: {
+ if (ql_mii_read_reg(qdev, 0x1A, &reg))
+ return 0;
+
+ return ((reg & 0x0080) && (reg & 0x1000)) != 0;
+ }
+ case PHY_VITESSE_VSC8211:
+ default: {
+ if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
+ return 0;
+ return (reg & PHY_AUX_DUPLEX_STAT) != 0;
+ }
+ }
+}
+
+static int ql_is_phy_neg_pause(struct ql3_adapter *qdev)
+{
+ u16 reg;
+
+ if (ql_mii_read_reg(qdev, PHY_NEG_PARTNER, &reg) < 0)
+ return 0;
+
+ return (reg & PHY_NEG_PAUSE) != 0;
+}
+
+static int PHY_Setup(struct ql3_adapter *qdev)
+{
+ u16 reg1;
+ u16 reg2;
+ bool agereAddrChangeNeeded = false;
+ u32 miiAddr = 0;
+ int err;
+
+ /* Determine the PHY we are using by reading the ID's */
+ err = ql_mii_read_reg(qdev, PHY_ID_0_REG, &reg1);
+ if (err != 0) {
+ netdev_err(qdev->ndev, "Could not read from reg PHY_ID_0_REG\n");
+ return err;
+ }
+
+ err = ql_mii_read_reg(qdev, PHY_ID_1_REG, &reg2);
+ if (err != 0) {
+ netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG\n");
+ return err;
+ }
+
+ /* Check if we have a Agere PHY */
+ if ((reg1 == 0xffff) || (reg2 == 0xffff)) {
+
+ /* Determine which MII address we should be using
+ determined by the index of the card */
+ if (qdev->mac_index == 0)
+ miiAddr = MII_AGERE_ADDR_1;
+ else
+ miiAddr = MII_AGERE_ADDR_2;
+
+ err = ql_mii_read_reg_ex(qdev, PHY_ID_0_REG, &reg1, miiAddr);
+ if (err != 0) {
+ netdev_err(qdev->ndev,
+ "Could not read from reg PHY_ID_0_REG after Agere detected\n");
+ return err;
+ }
+
+ err = ql_mii_read_reg_ex(qdev, PHY_ID_1_REG, &reg2, miiAddr);
+ if (err != 0) {
+ netdev_err(qdev->ndev, "Could not read from reg PHY_ID_1_REG after Agere detected\n");
+ return err;
+ }
+
+ /* We need to remember to initialize the Agere PHY */
+ agereAddrChangeNeeded = true;
+ }
+
+ /* Determine the particular PHY we have on board to apply
+ PHY specific initializations */
+ qdev->phyType = getPhyType(qdev, reg1, reg2);
+
+ if ((qdev->phyType == PHY_AGERE_ET1011C) && agereAddrChangeNeeded) {
+ /* need this here so address gets changed */
+ phyAgereSpecificInit(qdev, miiAddr);
+ } else if (qdev->phyType == PHY_TYPE_UNKNOWN) {
+ netdev_err(qdev->ndev, "PHY is unknown\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 value;
+
+ if (enable)
+ value = (MAC_CONFIG_REG_PE | (MAC_CONFIG_REG_PE << 16));
+ else
+ value = (MAC_CONFIG_REG_PE << 16);
+
+ if (qdev->mac_index)
+ ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
+ else
+ ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 value;
+
+ if (enable)
+ value = (MAC_CONFIG_REG_SR | (MAC_CONFIG_REG_SR << 16));
+ else
+ value = (MAC_CONFIG_REG_SR << 16);
+
+ if (qdev->mac_index)
+ ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
+ else
+ ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 value;
+
+ if (enable)
+ value = (MAC_CONFIG_REG_GM | (MAC_CONFIG_REG_GM << 16));
+ else
+ value = (MAC_CONFIG_REG_GM << 16);
+
+ if (qdev->mac_index)
+ ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
+ else
+ ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 value;
+
+ if (enable)
+ value = (MAC_CONFIG_REG_FD | (MAC_CONFIG_REG_FD << 16));
+ else
+ value = (MAC_CONFIG_REG_FD << 16);
+
+ if (qdev->mac_index)
+ ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
+ else
+ ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 value;
+
+ if (enable)
+ value =
+ ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) |
+ ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16));
+ else
+ value = ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16);
+
+ if (qdev->mac_index)
+ ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
+ else
+ ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static int ql_is_fiber(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 bitToCheck = 0;
+ u32 temp;
+
+ switch (qdev->mac_index) {
+ case 0:
+ bitToCheck = PORT_STATUS_SM0;
+ break;
+ case 1:
+ bitToCheck = PORT_STATUS_SM1;
+ break;
+ }
+
+ temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
+ return (temp & bitToCheck) != 0;
+}
+
+static int ql_is_auto_cfg(struct ql3_adapter *qdev)
+{
+ u16 reg;
+ ql_mii_read_reg(qdev, 0x00, &reg);
+ return (reg & 0x1000) != 0;
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static int ql_is_auto_neg_complete(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 bitToCheck = 0;
+ u32 temp;
+
+ switch (qdev->mac_index) {
+ case 0:
+ bitToCheck = PORT_STATUS_AC0;
+ break;
+ case 1:
+ bitToCheck = PORT_STATUS_AC1;
+ break;
+ }
+
+ temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
+ if (temp & bitToCheck) {
+ netif_info(qdev, link, qdev->ndev, "Auto-Negotiate complete\n");
+ return 1;
+ }
+ netif_info(qdev, link, qdev->ndev, "Auto-Negotiate incomplete\n");
+ return 0;
+}
+
+/*
+ * ql_is_neg_pause() returns 1 if pause was negotiated to be on
+ */
+static int ql_is_neg_pause(struct ql3_adapter *qdev)
+{
+ if (ql_is_fiber(qdev))
+ return ql_is_petbi_neg_pause(qdev);
+ else
+ return ql_is_phy_neg_pause(qdev);
+}
+
+static int ql_auto_neg_error(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 bitToCheck = 0;
+ u32 temp;
+
+ switch (qdev->mac_index) {
+ case 0:
+ bitToCheck = PORT_STATUS_AE0;
+ break;
+ case 1:
+ bitToCheck = PORT_STATUS_AE1;
+ break;
+ }
+ temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
+ return (temp & bitToCheck) != 0;
+}
+
+static u32 ql_get_link_speed(struct ql3_adapter *qdev)
+{
+ if (ql_is_fiber(qdev))
+ return SPEED_1000;
+ else
+ return ql_phy_get_speed(qdev);
+}
+
+static int ql_is_link_full_dup(struct ql3_adapter *qdev)
+{
+ if (ql_is_fiber(qdev))
+ return 1;
+ else
+ return ql_is_full_dup(qdev);
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static int ql_link_down_detect(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 bitToCheck = 0;
+ u32 temp;
+
+ switch (qdev->mac_index) {
+ case 0:
+ bitToCheck = ISP_CONTROL_LINK_DN_0;
+ break;
+ case 1:
+ bitToCheck = ISP_CONTROL_LINK_DN_1;
+ break;
+ }
+
+ temp =
+ ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
+ return (temp & bitToCheck) != 0;
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static int ql_link_down_detect_clear(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+
+ switch (qdev->mac_index) {
+ case 0:
+ ql_write_common_reg(qdev,
+ &port_regs->CommonRegs.ispControlStatus,
+ (ISP_CONTROL_LINK_DN_0) |
+ (ISP_CONTROL_LINK_DN_0 << 16));
+ break;
+
+ case 1:
+ ql_write_common_reg(qdev,
+ &port_regs->CommonRegs.ispControlStatus,
+ (ISP_CONTROL_LINK_DN_1) |
+ (ISP_CONTROL_LINK_DN_1 << 16));
+ break;
+
+ default:
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static int ql_this_adapter_controls_port(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 bitToCheck = 0;
+ u32 temp;
+
+ switch (qdev->mac_index) {
+ case 0:
+ bitToCheck = PORT_STATUS_F1_ENABLED;
+ break;
+ case 1:
+ bitToCheck = PORT_STATUS_F3_ENABLED;
+ break;
+ default:
+ break;
+ }
+
+ temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
+ if (temp & bitToCheck) {
+ netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
+ "not link master\n");
+ return 0;
+ }
+
+ netif_printk(qdev, link, KERN_DEBUG, qdev->ndev, "link master\n");
+ return 1;
+}
+
+static void ql_phy_reset_ex(struct ql3_adapter *qdev)
+{
+ ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET,
+ PHYAddr[qdev->mac_index]);
+}
+
+static void ql_phy_start_neg_ex(struct ql3_adapter *qdev)
+{
+ u16 reg;
+ u16 portConfiguration;
+
+ if (qdev->phyType == PHY_AGERE_ET1011C)
+ ql_mii_write_reg(qdev, 0x13, 0x0000);
+ /* turn off external loopback */
+
+ if (qdev->mac_index == 0)
+ portConfiguration =
+ qdev->nvram_data.macCfg_port0.portConfiguration;
+ else
+ portConfiguration =
+ qdev->nvram_data.macCfg_port1.portConfiguration;
+
+ /* Some HBA's in the field are set to 0 and they need to
+ be reinterpreted with a default value */
+ if (portConfiguration == 0)
+ portConfiguration = PORT_CONFIG_DEFAULT;
+
+ /* Set the 1000 advertisements */
+ ql_mii_read_reg_ex(qdev, PHY_GIG_CONTROL, &reg,
+ PHYAddr[qdev->mac_index]);
+ reg &= ~PHY_GIG_ALL_PARAMS;
+
+ if (portConfiguration & PORT_CONFIG_1000MB_SPEED) {
+ if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED)
+ reg |= PHY_GIG_ADV_1000F;
+ else
+ reg |= PHY_GIG_ADV_1000H;
+ }
+
+ ql_mii_write_reg_ex(qdev, PHY_GIG_CONTROL, reg,
+ PHYAddr[qdev->mac_index]);
+
+ /* Set the 10/100 & pause negotiation advertisements */
+ ql_mii_read_reg_ex(qdev, PHY_NEG_ADVER, &reg,
+ PHYAddr[qdev->mac_index]);
+ reg &= ~PHY_NEG_ALL_PARAMS;
+
+ if (portConfiguration & PORT_CONFIG_SYM_PAUSE_ENABLED)
+ reg |= PHY_NEG_ASY_PAUSE | PHY_NEG_SYM_PAUSE;
+
+ if (portConfiguration & PORT_CONFIG_FULL_DUPLEX_ENABLED) {
+ if (portConfiguration & PORT_CONFIG_100MB_SPEED)
+ reg |= PHY_NEG_ADV_100F;
+
+ if (portConfiguration & PORT_CONFIG_10MB_SPEED)
+ reg |= PHY_NEG_ADV_10F;
+ }
+
+ if (portConfiguration & PORT_CONFIG_HALF_DUPLEX_ENABLED) {
+ if (portConfiguration & PORT_CONFIG_100MB_SPEED)
+ reg |= PHY_NEG_ADV_100H;
+
+ if (portConfiguration & PORT_CONFIG_10MB_SPEED)
+ reg |= PHY_NEG_ADV_10H;
+ }
+
+ if (portConfiguration & PORT_CONFIG_1000MB_SPEED)
+ reg |= 1;
+
+ ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER, reg,
+ PHYAddr[qdev->mac_index]);
+
+ ql_mii_read_reg_ex(qdev, CONTROL_REG, &reg, PHYAddr[qdev->mac_index]);
+
+ ql_mii_write_reg_ex(qdev, CONTROL_REG,
+ reg | PHY_CTRL_RESTART_NEG | PHY_CTRL_AUTO_NEG,
+ PHYAddr[qdev->mac_index]);
+}
+
+static void ql_phy_init_ex(struct ql3_adapter *qdev)
+{
+ ql_phy_reset_ex(qdev);
+ PHY_Setup(qdev);
+ ql_phy_start_neg_ex(qdev);
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static u32 ql_get_link_state(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 bitToCheck = 0;
+ u32 temp, linkState;
+
+ switch (qdev->mac_index) {
+ case 0:
+ bitToCheck = PORT_STATUS_UP0;
+ break;
+ case 1:
+ bitToCheck = PORT_STATUS_UP1;
+ break;
+ }
+
+ temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
+ if (temp & bitToCheck)
+ linkState = LS_UP;
+ else
+ linkState = LS_DOWN;
+
+ return linkState;
+}
+
+static int ql_port_start(struct ql3_adapter *qdev)
+{
+ if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
+ (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
+ 2) << 7)) {
+ netdev_err(qdev->ndev, "Could not get hw lock for GIO\n");
+ return -1;
+ }
+
+ if (ql_is_fiber(qdev)) {
+ ql_petbi_init(qdev);
+ } else {
+ /* Copper port */
+ ql_phy_init_ex(qdev);
+ }
+
+ ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
+ return 0;
+}
+
+static int ql_finish_auto_neg(struct ql3_adapter *qdev)
+{
+
+ if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
+ (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
+ 2) << 7))
+ return -1;
+
+ if (!ql_auto_neg_error(qdev)) {
+ if (test_bit(QL_LINK_MASTER, &qdev->flags)) {
+ /* configure the MAC */
+ netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
+ "Configuring link\n");
+ ql_mac_cfg_soft_reset(qdev, 1);
+ ql_mac_cfg_gig(qdev,
+ (ql_get_link_speed
+ (qdev) ==
+ SPEED_1000));
+ ql_mac_cfg_full_dup(qdev,
+ ql_is_link_full_dup
+ (qdev));
+ ql_mac_cfg_pause(qdev,
+ ql_is_neg_pause
+ (qdev));
+ ql_mac_cfg_soft_reset(qdev, 0);
+
+ /* enable the MAC */
+ netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
+ "Enabling mac\n");
+ ql_mac_enable(qdev, 1);
+ }
+
+ qdev->port_link_state = LS_UP;
+ netif_start_queue(qdev->ndev);
+ netif_carrier_on(qdev->ndev);
+ netif_info(qdev, link, qdev->ndev,
+ "Link is up at %d Mbps, %s duplex\n",
+ ql_get_link_speed(qdev),
+ ql_is_link_full_dup(qdev) ? "full" : "half");
+
+ } else { /* Remote error detected */
+
+ if (test_bit(QL_LINK_MASTER, &qdev->flags)) {
+ netif_printk(qdev, link, KERN_DEBUG, qdev->ndev,
+ "Remote error detected. Calling ql_port_start()\n");
+ /*
+ * ql_port_start() is shared code and needs
+ * to lock the PHY on it's own.
+ */
+ ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
+ if (ql_port_start(qdev)) /* Restart port */
+ return -1;
+ return 0;
+ }
+ }
+ ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
+ return 0;
+}
+
+static void ql_link_state_machine_work(struct work_struct *work)
+{
+ struct ql3_adapter *qdev =
+ container_of(work, struct ql3_adapter, link_state_work.work);
+
+ u32 curr_link_state;
+ unsigned long hw_flags;
+
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+
+ curr_link_state = ql_get_link_state(qdev);
+
+ if (test_bit(QL_RESET_ACTIVE, &qdev->flags)) {
+ netif_info(qdev, link, qdev->ndev,
+ "Reset in progress, skip processing link state\n");
+
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+
+ /* Restart timer on 2 second interval. */
+ mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
+
+ return;
+ }
+
+ switch (qdev->port_link_state) {
+ default:
+ if (test_bit(QL_LINK_MASTER, &qdev->flags))
+ ql_port_start(qdev);
+ qdev->port_link_state = LS_DOWN;
+ fallthrough;
+
+ case LS_DOWN:
+ if (curr_link_state == LS_UP) {
+ netif_info(qdev, link, qdev->ndev, "Link is up\n");
+ if (ql_is_auto_neg_complete(qdev))
+ ql_finish_auto_neg(qdev);
+
+ if (qdev->port_link_state == LS_UP)
+ ql_link_down_detect_clear(qdev);
+
+ qdev->port_link_state = LS_UP;
+ }
+ break;
+
+ case LS_UP:
+ /*
+ * See if the link is currently down or went down and came
+ * back up
+ */
+ if (curr_link_state == LS_DOWN) {
+ netif_info(qdev, link, qdev->ndev, "Link is down\n");
+ qdev->port_link_state = LS_DOWN;
+ }
+ if (ql_link_down_detect(qdev))
+ qdev->port_link_state = LS_DOWN;
+ break;
+ }
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+
+ /* Restart timer on 2 second interval. */
+ mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
+}
+
+/*
+ * Caller must take hw_lock and QL_PHY_GIO_SEM.
+ */
+static void ql_get_phy_owner(struct ql3_adapter *qdev)
+{
+ if (ql_this_adapter_controls_port(qdev))
+ set_bit(QL_LINK_MASTER, &qdev->flags);
+ else
+ clear_bit(QL_LINK_MASTER, &qdev->flags);
+}
+
+/*
+ * Caller must take hw_lock and QL_PHY_GIO_SEM.
+ */
+static void ql_init_scan_mode(struct ql3_adapter *qdev)
+{
+ ql_mii_enable_scan_mode(qdev);
+
+ if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) {
+ if (ql_this_adapter_controls_port(qdev))
+ ql_petbi_init_ex(qdev);
+ } else {
+ if (ql_this_adapter_controls_port(qdev))
+ ql_phy_init_ex(qdev);
+ }
+}
+
+/*
+ * MII_Setup needs to be called before taking the PHY out of reset
+ * so that the management interface clock speed can be set properly.
+ * It would be better if we had a way to disable MDC until after the
+ * PHY is out of reset, but we don't have that capability.
+ */
+static int ql_mii_setup(struct ql3_adapter *qdev)
+{
+ u32 reg;
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+
+ if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
+ (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
+ 2) << 7))
+ return -1;
+
+ if (qdev->device_id == QL3032_DEVICE_ID)
+ ql_write_page0_reg(qdev,
+ &port_regs->macMIIMgmtControlReg, 0x0f00000);
+
+ /* Divide 125MHz clock by 28 to meet PHY timing requirements */
+ reg = MAC_MII_CONTROL_CLK_SEL_DIV28;
+
+ ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
+ reg | ((MAC_MII_CONTROL_CLK_SEL_MASK) << 16));
+
+ ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
+ return 0;
+}
+
+#define SUPPORTED_OPTICAL_MODES (SUPPORTED_1000baseT_Full | \
+ SUPPORTED_FIBRE | \
+ SUPPORTED_Autoneg)
+#define SUPPORTED_TP_MODES (SUPPORTED_10baseT_Half | \
+ SUPPORTED_10baseT_Full | \
+ SUPPORTED_100baseT_Half | \
+ SUPPORTED_100baseT_Full | \
+ SUPPORTED_1000baseT_Half | \
+ SUPPORTED_1000baseT_Full | \
+ SUPPORTED_Autoneg | \
+ SUPPORTED_TP) \
+
+static u32 ql_supported_modes(struct ql3_adapter *qdev)
+{
+ if (test_bit(QL_LINK_OPTICAL, &qdev->flags))
+ return SUPPORTED_OPTICAL_MODES;
+
+ return SUPPORTED_TP_MODES;
+}
+
+static int ql_get_auto_cfg_status(struct ql3_adapter *qdev)
+{
+ int status;
+ unsigned long hw_flags;
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+ if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
+ (QL_RESOURCE_BITS_BASE_CODE |
+ (qdev->mac_index) * 2) << 7)) {
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+ return 0;
+ }
+ status = ql_is_auto_cfg(qdev);
+ ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+ return status;
+}
+
+static u32 ql_get_speed(struct ql3_adapter *qdev)
+{
+ u32 status;
+ unsigned long hw_flags;
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+ if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
+ (QL_RESOURCE_BITS_BASE_CODE |
+ (qdev->mac_index) * 2) << 7)) {
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+ return 0;
+ }
+ status = ql_get_link_speed(qdev);
+ ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+ return status;
+}
+
+static int ql_get_full_dup(struct ql3_adapter *qdev)
+{
+ int status;
+ unsigned long hw_flags;
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+ if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
+ (QL_RESOURCE_BITS_BASE_CODE |
+ (qdev->mac_index) * 2) << 7)) {
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+ return 0;
+ }
+ status = ql_is_link_full_dup(qdev);
+ ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+ return status;
+}
+
+static int ql_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct ql3_adapter *qdev = netdev_priv(ndev);
+ u32 supported, advertising;
+
+ supported = ql_supported_modes(qdev);
+
+ if (test_bit(QL_LINK_OPTICAL, &qdev->flags)) {
+ cmd->base.port = PORT_FIBRE;
+ } else {
+ cmd->base.port = PORT_TP;
+ cmd->base.phy_address = qdev->PHYAddr;
+ }
+ advertising = ql_supported_modes(qdev);
+ cmd->base.autoneg = ql_get_auto_cfg_status(qdev);
+ cmd->base.speed = ql_get_speed(qdev);
+ cmd->base.duplex = ql_get_full_dup(qdev);
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
+
+ return 0;
+}
+
+static void ql_get_drvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct ql3_adapter *qdev = netdev_priv(ndev);
+ strscpy(drvinfo->driver, ql3xxx_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->version, ql3xxx_driver_version,
+ sizeof(drvinfo->version));
+ strscpy(drvinfo->bus_info, pci_name(qdev->pdev),
+ sizeof(drvinfo->bus_info));
+}
+
+static u32 ql_get_msglevel(struct net_device *ndev)
+{
+ struct ql3_adapter *qdev = netdev_priv(ndev);
+ return qdev->msg_enable;
+}
+
+static void ql_set_msglevel(struct net_device *ndev, u32 value)
+{
+ struct ql3_adapter *qdev = netdev_priv(ndev);
+ qdev->msg_enable = value;
+}
+
+static void ql_get_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct ql3_adapter *qdev = netdev_priv(ndev);
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+
+ u32 reg;
+ if (qdev->mac_index == 0)
+ reg = ql_read_page0_reg(qdev, &port_regs->mac0ConfigReg);
+ else
+ reg = ql_read_page0_reg(qdev, &port_regs->mac1ConfigReg);
+
+ pause->autoneg = ql_get_auto_cfg_status(qdev);
+ pause->rx_pause = (reg & MAC_CONFIG_REG_RF) >> 2;
+ pause->tx_pause = (reg & MAC_CONFIG_REG_TF) >> 1;
+}
+
+static const struct ethtool_ops ql3xxx_ethtool_ops = {
+ .get_drvinfo = ql_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_msglevel = ql_get_msglevel,
+ .set_msglevel = ql_set_msglevel,
+ .get_pauseparam = ql_get_pauseparam,
+ .get_link_ksettings = ql_get_link_ksettings,
+};
+
+static int ql_populate_free_queue(struct ql3_adapter *qdev)
+{
+ struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head;
+ dma_addr_t map;
+ int err;
+
+ while (lrg_buf_cb) {
+ if (!lrg_buf_cb->skb) {
+ lrg_buf_cb->skb =
+ netdev_alloc_skb(qdev->ndev,
+ qdev->lrg_buffer_len);
+ if (unlikely(!lrg_buf_cb->skb)) {
+ netdev_printk(KERN_DEBUG, qdev->ndev,
+ "Failed netdev_alloc_skb()\n");
+ break;
+ } else {
+ /*
+ * We save some space to copy the ethhdr from
+ * first buffer
+ */
+ skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
+ map = dma_map_single(&qdev->pdev->dev,
+ lrg_buf_cb->skb->data,
+ qdev->lrg_buffer_len - QL_HEADER_SPACE,
+ DMA_FROM_DEVICE);
+
+ err = dma_mapping_error(&qdev->pdev->dev, map);
+ if (err) {
+ netdev_err(qdev->ndev,
+ "PCI mapping failed with error: %d\n",
+ err);
+ dev_kfree_skb(lrg_buf_cb->skb);
+ lrg_buf_cb->skb = NULL;
+ break;
+ }
+
+
+ lrg_buf_cb->buf_phy_addr_low =
+ cpu_to_le32(LS_64BITS(map));
+ lrg_buf_cb->buf_phy_addr_high =
+ cpu_to_le32(MS_64BITS(map));
+ dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
+ dma_unmap_len_set(lrg_buf_cb, maplen,
+ qdev->lrg_buffer_len -
+ QL_HEADER_SPACE);
+ --qdev->lrg_buf_skb_check;
+ if (!qdev->lrg_buf_skb_check)
+ return 1;
+ }
+ }
+ lrg_buf_cb = lrg_buf_cb->next;
+ }
+ return 0;
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void ql_update_small_bufq_prod_index(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+
+ if (qdev->small_buf_release_cnt >= 16) {
+ while (qdev->small_buf_release_cnt >= 16) {
+ qdev->small_buf_q_producer_index++;
+
+ if (qdev->small_buf_q_producer_index ==
+ NUM_SBUFQ_ENTRIES)
+ qdev->small_buf_q_producer_index = 0;
+ qdev->small_buf_release_cnt -= 8;
+ }
+ wmb();
+ writel_relaxed(qdev->small_buf_q_producer_index,
+ &port_regs->CommonRegs.rxSmallQProducerIndex);
+ }
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev)
+{
+ struct bufq_addr_element *lrg_buf_q_ele;
+ int i;
+ struct ql_rcv_buf_cb *lrg_buf_cb;
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+
+ if ((qdev->lrg_buf_free_count >= 8) &&
+ (qdev->lrg_buf_release_cnt >= 16)) {
+
+ if (qdev->lrg_buf_skb_check)
+ if (!ql_populate_free_queue(qdev))
+ return;
+
+ lrg_buf_q_ele = qdev->lrg_buf_next_free;
+
+ while ((qdev->lrg_buf_release_cnt >= 16) &&
+ (qdev->lrg_buf_free_count >= 8)) {
+
+ for (i = 0; i < 8; i++) {
+ lrg_buf_cb =
+ ql_get_from_lrg_buf_free_list(qdev);
+ lrg_buf_q_ele->addr_high =
+ lrg_buf_cb->buf_phy_addr_high;
+ lrg_buf_q_ele->addr_low =
+ lrg_buf_cb->buf_phy_addr_low;
+ lrg_buf_q_ele++;
+
+ qdev->lrg_buf_release_cnt--;
+ }
+
+ qdev->lrg_buf_q_producer_index++;
+
+ if (qdev->lrg_buf_q_producer_index ==
+ qdev->num_lbufq_entries)
+ qdev->lrg_buf_q_producer_index = 0;
+
+ if (qdev->lrg_buf_q_producer_index ==
+ (qdev->num_lbufq_entries - 1)) {
+ lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr;
+ }
+ }
+ wmb();
+ qdev->lrg_buf_next_free = lrg_buf_q_ele;
+ writel(qdev->lrg_buf_q_producer_index,
+ &port_regs->CommonRegs.rxLargeQProducerIndex);
+ }
+}
+
+static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
+ struct ob_mac_iocb_rsp *mac_rsp)
+{
+ struct ql_tx_buf_cb *tx_cb;
+ int i;
+
+ if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
+ netdev_warn(qdev->ndev,
+ "Frame too short but it was padded and sent\n");
+ }
+
+ tx_cb = &qdev->tx_buf[mac_rsp->transaction_id];
+
+ /* Check the transmit response flags for any errors */
+ if (mac_rsp->flags & OB_MAC_IOCB_RSP_S) {
+ netdev_err(qdev->ndev,
+ "Frame too short to be legal, frame not sent\n");
+
+ qdev->ndev->stats.tx_errors++;
+ goto frame_not_sent;
+ }
+
+ if (tx_cb->seg_count == 0) {
+ netdev_err(qdev->ndev, "tx_cb->seg_count == 0: %d\n",
+ mac_rsp->transaction_id);
+
+ qdev->ndev->stats.tx_errors++;
+ goto invalid_seg_count;
+ }
+
+ dma_unmap_single(&qdev->pdev->dev,
+ dma_unmap_addr(&tx_cb->map[0], mapaddr),
+ dma_unmap_len(&tx_cb->map[0], maplen), DMA_TO_DEVICE);
+ tx_cb->seg_count--;
+ if (tx_cb->seg_count) {
+ for (i = 1; i < tx_cb->seg_count; i++) {
+ dma_unmap_page(&qdev->pdev->dev,
+ dma_unmap_addr(&tx_cb->map[i], mapaddr),
+ dma_unmap_len(&tx_cb->map[i], maplen),
+ DMA_TO_DEVICE);
+ }
+ }
+ qdev->ndev->stats.tx_packets++;
+ qdev->ndev->stats.tx_bytes += tx_cb->skb->len;
+
+frame_not_sent:
+ dev_kfree_skb_irq(tx_cb->skb);
+ tx_cb->skb = NULL;
+
+invalid_seg_count:
+ atomic_inc(&qdev->tx_count);
+}
+
+static void ql_get_sbuf(struct ql3_adapter *qdev)
+{
+ if (++qdev->small_buf_index == NUM_SMALL_BUFFERS)
+ qdev->small_buf_index = 0;
+ qdev->small_buf_release_cnt++;
+}
+
+static struct ql_rcv_buf_cb *ql_get_lbuf(struct ql3_adapter *qdev)
+{
+ struct ql_rcv_buf_cb *lrg_buf_cb = NULL;
+ lrg_buf_cb = &qdev->lrg_buf[qdev->lrg_buf_index];
+ qdev->lrg_buf_release_cnt++;
+ if (++qdev->lrg_buf_index == qdev->num_large_buffers)
+ qdev->lrg_buf_index = 0;
+ return lrg_buf_cb;
+}
+
+/*
+ * The difference between 3022 and 3032 for inbound completions:
+ * 3022 uses two buffers per completion. The first buffer contains
+ * (some) header info, the second the remainder of the headers plus
+ * the data. For this chip we reserve some space at the top of the
+ * receive buffer so that the header info in buffer one can be
+ * prepended to the buffer two. Buffer two is the sent up while
+ * buffer one is returned to the hardware to be reused.
+ * 3032 receives all of it's data and headers in one buffer for a
+ * simpler process. 3032 also supports checksum verification as
+ * can be seen in ql_process_macip_rx_intr().
+ */
+static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
+ struct ib_mac_iocb_rsp *ib_mac_rsp_ptr)
+{
+ struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
+ struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
+ struct sk_buff *skb;
+ u16 length = le16_to_cpu(ib_mac_rsp_ptr->length);
+
+ /*
+ * Get the inbound address list (small buffer).
+ */
+ ql_get_sbuf(qdev);
+
+ if (qdev->device_id == QL3022_DEVICE_ID)
+ lrg_buf_cb1 = ql_get_lbuf(qdev);
+
+ /* start of second buffer */
+ lrg_buf_cb2 = ql_get_lbuf(qdev);
+ skb = lrg_buf_cb2->skb;
+
+ qdev->ndev->stats.rx_packets++;
+ qdev->ndev->stats.rx_bytes += length;
+
+ skb_put(skb, length);
+ dma_unmap_single(&qdev->pdev->dev,
+ dma_unmap_addr(lrg_buf_cb2, mapaddr),
+ dma_unmap_len(lrg_buf_cb2, maplen), DMA_FROM_DEVICE);
+ prefetch(skb->data);
+ skb_checksum_none_assert(skb);
+ skb->protocol = eth_type_trans(skb, qdev->ndev);
+
+ napi_gro_receive(&qdev->napi, skb);
+ lrg_buf_cb2->skb = NULL;
+
+ if (qdev->device_id == QL3022_DEVICE_ID)
+ ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
+ ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
+}
+
+static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
+ struct ib_ip_iocb_rsp *ib_ip_rsp_ptr)
+{
+ struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
+ struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
+ struct sk_buff *skb1 = NULL, *skb2;
+ struct net_device *ndev = qdev->ndev;
+ u16 length = le16_to_cpu(ib_ip_rsp_ptr->length);
+ u16 size = 0;
+
+ /*
+ * Get the inbound address list (small buffer).
+ */
+
+ ql_get_sbuf(qdev);
+
+ if (qdev->device_id == QL3022_DEVICE_ID) {
+ /* start of first buffer on 3022 */
+ lrg_buf_cb1 = ql_get_lbuf(qdev);
+ skb1 = lrg_buf_cb1->skb;
+ size = ETH_HLEN;
+ if (*((u16 *) skb1->data) != 0xFFFF)
+ size += VLAN_ETH_HLEN - ETH_HLEN;
+ }
+
+ /* start of second buffer */
+ lrg_buf_cb2 = ql_get_lbuf(qdev);
+ skb2 = lrg_buf_cb2->skb;
+
+ skb_put(skb2, length); /* Just the second buffer length here. */
+ dma_unmap_single(&qdev->pdev->dev,
+ dma_unmap_addr(lrg_buf_cb2, mapaddr),
+ dma_unmap_len(lrg_buf_cb2, maplen), DMA_FROM_DEVICE);
+ prefetch(skb2->data);
+
+ skb_checksum_none_assert(skb2);
+ if (qdev->device_id == QL3022_DEVICE_ID) {
+ /*
+ * Copy the ethhdr from first buffer to second. This
+ * is necessary for 3022 IP completions.
+ */
+ skb_copy_from_linear_data_offset(skb1, VLAN_ID_LEN,
+ skb_push(skb2, size), size);
+ } else {
+ u16 checksum = le16_to_cpu(ib_ip_rsp_ptr->checksum);
+ if (checksum &
+ (IB_IP_IOCB_RSP_3032_ICE |
+ IB_IP_IOCB_RSP_3032_CE)) {
+ netdev_err(ndev,
+ "%s: Bad checksum for this %s packet, checksum = %x\n",
+ __func__,
+ ((checksum & IB_IP_IOCB_RSP_3032_TCP) ?
+ "TCP" : "UDP"), checksum);
+ } else if ((checksum & IB_IP_IOCB_RSP_3032_TCP) ||
+ (checksum & IB_IP_IOCB_RSP_3032_UDP &&
+ !(checksum & IB_IP_IOCB_RSP_3032_NUC))) {
+ skb2->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+ }
+ skb2->protocol = eth_type_trans(skb2, qdev->ndev);
+
+ napi_gro_receive(&qdev->napi, skb2);
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += length;
+ lrg_buf_cb2->skb = NULL;
+
+ if (qdev->device_id == QL3022_DEVICE_ID)
+ ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
+ ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
+}
+
+static int ql_tx_rx_clean(struct ql3_adapter *qdev, int budget)
+{
+ struct net_rsp_iocb *net_rsp;
+ struct net_device *ndev = qdev->ndev;
+ int work_done = 0;
+
+ /* While there are entries in the completion queue. */
+ while ((le32_to_cpu(*(qdev->prsp_producer_index)) !=
+ qdev->rsp_consumer_index) && (work_done < budget)) {
+
+ net_rsp = qdev->rsp_current;
+ rmb();
+ /*
+ * Fix 4032 chip's undocumented "feature" where bit-8 is set
+ * if the inbound completion is for a VLAN.
+ */
+ if (qdev->device_id == QL3032_DEVICE_ID)
+ net_rsp->opcode &= 0x7f;
+ switch (net_rsp->opcode) {
+
+ case OPCODE_OB_MAC_IOCB_FN0:
+ case OPCODE_OB_MAC_IOCB_FN2:
+ ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *)
+ net_rsp);
+ break;
+
+ case OPCODE_IB_MAC_IOCB:
+ case OPCODE_IB_3032_MAC_IOCB:
+ ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *)
+ net_rsp);
+ work_done++;
+ break;
+
+ case OPCODE_IB_IP_IOCB:
+ case OPCODE_IB_3032_IP_IOCB:
+ ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *)
+ net_rsp);
+ work_done++;
+ break;
+ default: {
+ u32 *tmp = (u32 *)net_rsp;
+ netdev_err(ndev,
+ "Hit default case, not handled!\n"
+ " dropping the packet, opcode = %x\n"
+ "0x%08lx 0x%08lx 0x%08lx 0x%08lx\n",
+ net_rsp->opcode,
+ (unsigned long int)tmp[0],
+ (unsigned long int)tmp[1],
+ (unsigned long int)tmp[2],
+ (unsigned long int)tmp[3]);
+ }
+ }
+
+ qdev->rsp_consumer_index++;
+
+ if (qdev->rsp_consumer_index == NUM_RSP_Q_ENTRIES) {
+ qdev->rsp_consumer_index = 0;
+ qdev->rsp_current = qdev->rsp_q_virt_addr;
+ } else {
+ qdev->rsp_current++;
+ }
+
+ }
+
+ return work_done;
+}
+
+static int ql_poll(struct napi_struct *napi, int budget)
+{
+ struct ql3_adapter *qdev = container_of(napi, struct ql3_adapter, napi);
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ int work_done;
+
+ work_done = ql_tx_rx_clean(qdev, budget);
+
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&qdev->hw_lock, flags);
+ ql_update_small_bufq_prod_index(qdev);
+ ql_update_lrg_bufq_prod_index(qdev);
+ writel(qdev->rsp_consumer_index,
+ &port_regs->CommonRegs.rspQConsumerIndex);
+ spin_unlock_irqrestore(&qdev->hw_lock, flags);
+
+ ql_enable_interrupts(qdev);
+ }
+ return work_done;
+}
+
+static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
+{
+
+ struct net_device *ndev = dev_id;
+ struct ql3_adapter *qdev = netdev_priv(ndev);
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 value;
+ int handled = 1;
+ u32 var;
+
+ value = ql_read_common_reg_l(qdev,
+ &port_regs->CommonRegs.ispControlStatus);
+
+ if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) {
+ spin_lock(&qdev->adapter_lock);
+ netif_stop_queue(qdev->ndev);
+ netif_carrier_off(qdev->ndev);
+ ql_disable_interrupts(qdev);
+ qdev->port_link_state = LS_DOWN;
+ set_bit(QL_RESET_ACTIVE, &qdev->flags) ;
+
+ if (value & ISP_CONTROL_FE) {
+ /*
+ * Chip Fatal Error.
+ */
+ var =
+ ql_read_page0_reg_l(qdev,
+ &port_regs->PortFatalErrStatus);
+ netdev_warn(ndev,
+ "Resetting chip. PortFatalErrStatus register = 0x%x\n",
+ var);
+ set_bit(QL_RESET_START, &qdev->flags) ;
+ } else {
+ /*
+ * Soft Reset Requested.
+ */
+ set_bit(QL_RESET_PER_SCSI, &qdev->flags) ;
+ netdev_err(ndev,
+ "Another function issued a reset to the chip. ISR value = %x\n",
+ value);
+ }
+ queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0);
+ spin_unlock(&qdev->adapter_lock);
+ } else if (value & ISP_IMR_DISABLE_CMPL_INT) {
+ ql_disable_interrupts(qdev);
+ if (likely(napi_schedule_prep(&qdev->napi)))
+ __napi_schedule(&qdev->napi);
+ } else
+ return IRQ_NONE;
+
+ return IRQ_RETVAL(handled);
+}
+
+/*
+ * Get the total number of segments needed for the given number of fragments.
+ * This is necessary because outbound address lists (OAL) will be used when
+ * more than two frags are given. Each address list has 5 addr/len pairs.
+ * The 5th pair in each OAL is used to point to the next OAL if more frags
+ * are coming. That is why the frags:segment count ratio is not linear.
+ */
+static int ql_get_seg_count(struct ql3_adapter *qdev, unsigned short frags)
+{
+ if (qdev->device_id == QL3022_DEVICE_ID)
+ return 1;
+
+ if (frags <= 2)
+ return frags + 1;
+ else if (frags <= 6)
+ return frags + 2;
+ else if (frags <= 10)
+ return frags + 3;
+ else if (frags <= 14)
+ return frags + 4;
+ else if (frags <= 18)
+ return frags + 5;
+ return -1;
+}
+
+static void ql_hw_csum_setup(const struct sk_buff *skb,
+ struct ob_mac_iocb_req *mac_iocb_ptr)
+{
+ const struct iphdr *ip = ip_hdr(skb);
+
+ mac_iocb_ptr->ip_hdr_off = skb_network_offset(skb);
+ mac_iocb_ptr->ip_hdr_len = ip->ihl;
+
+ if (ip->protocol == IPPROTO_TCP) {
+ mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC |
+ OB_3032MAC_IOCB_REQ_IC;
+ } else {
+ mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC |
+ OB_3032MAC_IOCB_REQ_IC;
+ }
+
+}
+
+/*
+ * Map the buffers for this transmit.
+ * This will return NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
+ */
+static int ql_send_map(struct ql3_adapter *qdev,
+ struct ob_mac_iocb_req *mac_iocb_ptr,
+ struct ql_tx_buf_cb *tx_cb,
+ struct sk_buff *skb)
+{
+ struct oal *oal;
+ struct oal_entry *oal_entry;
+ int len = skb_headlen(skb);
+ dma_addr_t map;
+ int err;
+ int completed_segs, i;
+ int seg_cnt, seg = 0;
+ int frag_cnt = (int)skb_shinfo(skb)->nr_frags;
+
+ seg_cnt = tx_cb->seg_count;
+ /*
+ * Map the skb buffer first.
+ */
+ map = dma_map_single(&qdev->pdev->dev, skb->data, len, DMA_TO_DEVICE);
+
+ err = dma_mapping_error(&qdev->pdev->dev, map);
+ if (err) {
+ netdev_err(qdev->ndev, "PCI mapping failed with error: %d\n",
+ err);
+
+ return NETDEV_TX_BUSY;
+ }
+
+ oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
+ oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
+ oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
+ oal_entry->len = cpu_to_le32(len);
+ dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
+ dma_unmap_len_set(&tx_cb->map[seg], maplen, len);
+ seg++;
+
+ if (seg_cnt == 1) {
+ /* Terminate the last segment. */
+ oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY);
+ return NETDEV_TX_OK;
+ }
+ oal = tx_cb->oal;
+ for (completed_segs = 0;
+ completed_segs < frag_cnt;
+ completed_segs++, seg++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[completed_segs];
+ oal_entry++;
+ /*
+ * Check for continuation requirements.
+ * It's strange but necessary.
+ * Continuation entry points to outbound address list.
+ */
+ if ((seg == 2 && seg_cnt > 3) ||
+ (seg == 7 && seg_cnt > 8) ||
+ (seg == 12 && seg_cnt > 13) ||
+ (seg == 17 && seg_cnt > 18)) {
+ map = dma_map_single(&qdev->pdev->dev, oal,
+ sizeof(struct oal),
+ DMA_TO_DEVICE);
+
+ err = dma_mapping_error(&qdev->pdev->dev, map);
+ if (err) {
+ netdev_err(qdev->ndev,
+ "PCI mapping outbound address list with error: %d\n",
+ err);
+ goto map_error;
+ }
+
+ oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
+ oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
+ oal_entry->len = cpu_to_le32(sizeof(struct oal) |
+ OAL_CONT_ENTRY);
+ dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
+ dma_unmap_len_set(&tx_cb->map[seg], maplen,
+ sizeof(struct oal));
+ oal_entry = (struct oal_entry *)oal;
+ oal++;
+ seg++;
+ }
+
+ map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
+ DMA_TO_DEVICE);
+
+ err = dma_mapping_error(&qdev->pdev->dev, map);
+ if (err) {
+ netdev_err(qdev->ndev,
+ "PCI mapping frags failed with error: %d\n",
+ err);
+ goto map_error;
+ }
+
+ oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
+ oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
+ oal_entry->len = cpu_to_le32(skb_frag_size(frag));
+ dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
+ dma_unmap_len_set(&tx_cb->map[seg], maplen, skb_frag_size(frag));
+ }
+ /* Terminate the last segment. */
+ oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY);
+ return NETDEV_TX_OK;
+
+map_error:
+ /* A PCI mapping failed and now we will need to back out
+ * We need to traverse through the oal's and associated pages which
+ * have been mapped and now we must unmap them to clean up properly
+ */
+
+ seg = 1;
+ oal_entry = (struct oal_entry *)&mac_iocb_ptr->buf_addr0_low;
+ oal = tx_cb->oal;
+ for (i = 0; i < completed_segs; i++, seg++) {
+ oal_entry++;
+
+ /*
+ * Check for continuation requirements.
+ * It's strange but necessary.
+ */
+
+ if ((seg == 2 && seg_cnt > 3) ||
+ (seg == 7 && seg_cnt > 8) ||
+ (seg == 12 && seg_cnt > 13) ||
+ (seg == 17 && seg_cnt > 18)) {
+ dma_unmap_single(&qdev->pdev->dev,
+ dma_unmap_addr(&tx_cb->map[seg], mapaddr),
+ dma_unmap_len(&tx_cb->map[seg], maplen),
+ DMA_TO_DEVICE);
+ oal++;
+ seg++;
+ }
+
+ dma_unmap_page(&qdev->pdev->dev,
+ dma_unmap_addr(&tx_cb->map[seg], mapaddr),
+ dma_unmap_len(&tx_cb->map[seg], maplen),
+ DMA_TO_DEVICE);
+ }
+
+ dma_unmap_single(&qdev->pdev->dev,
+ dma_unmap_addr(&tx_cb->map[0], mapaddr),
+ dma_unmap_addr(&tx_cb->map[0], maplen),
+ DMA_TO_DEVICE);
+
+ return NETDEV_TX_BUSY;
+
+}
+
+/*
+ * The difference between 3022 and 3032 sends:
+ * 3022 only supports a simple single segment transmission.
+ * 3032 supports checksumming and scatter/gather lists (fragments).
+ * The 3032 supports sglists by using the 3 addr/len pairs (ALP)
+ * in the IOCB plus a chain of outbound address lists (OAL) that
+ * each contain 5 ALPs. The last ALP of the IOCB (3rd) or OAL (5th)
+ * will be used to point to an OAL when more ALP entries are required.
+ * The IOCB is always the top of the chain followed by one or more
+ * OALs (when necessary).
+ */
+static netdev_tx_t ql3xxx_send(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct ql3_adapter *qdev = netdev_priv(ndev);
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ struct ql_tx_buf_cb *tx_cb;
+ u32 tot_len = skb->len;
+ struct ob_mac_iocb_req *mac_iocb_ptr;
+
+ if (unlikely(atomic_read(&qdev->tx_count) < 2))
+ return NETDEV_TX_BUSY;
+
+ tx_cb = &qdev->tx_buf[qdev->req_producer_index];
+ tx_cb->seg_count = ql_get_seg_count(qdev,
+ skb_shinfo(skb)->nr_frags);
+ if (tx_cb->seg_count == -1) {
+ netdev_err(ndev, "%s: invalid segment count!\n", __func__);
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ mac_iocb_ptr = tx_cb->queue_entry;
+ memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req));
+ mac_iocb_ptr->opcode = qdev->mac_ob_opcode;
+ mac_iocb_ptr->flags = OB_MAC_IOCB_REQ_X;
+ mac_iocb_ptr->flags |= qdev->mb_bit_mask;
+ mac_iocb_ptr->transaction_id = qdev->req_producer_index;
+ mac_iocb_ptr->data_len = cpu_to_le16((u16) tot_len);
+ tx_cb->skb = skb;
+ if (qdev->device_id == QL3032_DEVICE_ID &&
+ skb->ip_summed == CHECKSUM_PARTIAL)
+ ql_hw_csum_setup(skb, mac_iocb_ptr);
+
+ if (ql_send_map(qdev, mac_iocb_ptr, tx_cb, skb) != NETDEV_TX_OK) {
+ netdev_err(ndev, "%s: Could not map the segments!\n", __func__);
+ return NETDEV_TX_BUSY;
+ }
+
+ wmb();
+ qdev->req_producer_index++;
+ if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES)
+ qdev->req_producer_index = 0;
+ wmb();
+ ql_write_common_reg_l(qdev,
+ &port_regs->CommonRegs.reqQProducerIndex,
+ qdev->req_producer_index);
+
+ netif_printk(qdev, tx_queued, KERN_DEBUG, ndev,
+ "tx queued, slot %d, len %d\n",
+ qdev->req_producer_index, skb->len);
+
+ atomic_dec(&qdev->tx_count);
+ return NETDEV_TX_OK;
+}
+
+static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
+{
+ qdev->req_q_size =
+ (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req));
+
+ qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb);
+
+ /* The barrier is required to ensure request and response queue
+ * addr writes to the registers.
+ */
+ wmb();
+
+ qdev->req_q_virt_addr =
+ dma_alloc_coherent(&qdev->pdev->dev, (size_t)qdev->req_q_size,
+ &qdev->req_q_phy_addr, GFP_KERNEL);
+
+ if ((qdev->req_q_virt_addr == NULL) ||
+ LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) {
+ netdev_err(qdev->ndev, "reqQ failed\n");
+ return -ENOMEM;
+ }
+
+ qdev->rsp_q_virt_addr =
+ dma_alloc_coherent(&qdev->pdev->dev, (size_t)qdev->rsp_q_size,
+ &qdev->rsp_q_phy_addr, GFP_KERNEL);
+
+ if ((qdev->rsp_q_virt_addr == NULL) ||
+ LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) {
+ netdev_err(qdev->ndev, "rspQ allocation failed\n");
+ dma_free_coherent(&qdev->pdev->dev, (size_t)qdev->req_q_size,
+ qdev->req_q_virt_addr, qdev->req_q_phy_addr);
+ return -ENOMEM;
+ }
+
+ set_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags);
+
+ return 0;
+}
+
+static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev)
+{
+ if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags)) {
+ netdev_info(qdev->ndev, "Already done\n");
+ return;
+ }
+
+ dma_free_coherent(&qdev->pdev->dev, qdev->req_q_size,
+ qdev->req_q_virt_addr, qdev->req_q_phy_addr);
+
+ qdev->req_q_virt_addr = NULL;
+
+ dma_free_coherent(&qdev->pdev->dev, qdev->rsp_q_size,
+ qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr);
+
+ qdev->rsp_q_virt_addr = NULL;
+
+ clear_bit(QL_ALLOC_REQ_RSP_Q_DONE, &qdev->flags);
+}
+
+static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
+{
+ /* Create Large Buffer Queue */
+ qdev->lrg_buf_q_size =
+ qdev->num_lbufq_entries * sizeof(struct lrg_buf_q_entry);
+ if (qdev->lrg_buf_q_size < PAGE_SIZE)
+ qdev->lrg_buf_q_alloc_size = PAGE_SIZE;
+ else
+ qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2;
+
+ qdev->lrg_buf = kmalloc_array(qdev->num_large_buffers,
+ sizeof(struct ql_rcv_buf_cb),
+ GFP_KERNEL);
+ if (qdev->lrg_buf == NULL)
+ return -ENOMEM;
+
+ qdev->lrg_buf_q_alloc_virt_addr =
+ dma_alloc_coherent(&qdev->pdev->dev,
+ qdev->lrg_buf_q_alloc_size,
+ &qdev->lrg_buf_q_alloc_phy_addr, GFP_KERNEL);
+
+ if (qdev->lrg_buf_q_alloc_virt_addr == NULL) {
+ netdev_err(qdev->ndev, "lBufQ failed\n");
+ kfree(qdev->lrg_buf);
+ return -ENOMEM;
+ }
+ qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr;
+ qdev->lrg_buf_q_phy_addr = qdev->lrg_buf_q_alloc_phy_addr;
+
+ /* Create Small Buffer Queue */
+ qdev->small_buf_q_size =
+ NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry);
+ if (qdev->small_buf_q_size < PAGE_SIZE)
+ qdev->small_buf_q_alloc_size = PAGE_SIZE;
+ else
+ qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2;
+
+ qdev->small_buf_q_alloc_virt_addr =
+ dma_alloc_coherent(&qdev->pdev->dev,
+ qdev->small_buf_q_alloc_size,
+ &qdev->small_buf_q_alloc_phy_addr, GFP_KERNEL);
+
+ if (qdev->small_buf_q_alloc_virt_addr == NULL) {
+ netdev_err(qdev->ndev, "Small Buffer Queue allocation failed\n");
+ dma_free_coherent(&qdev->pdev->dev,
+ qdev->lrg_buf_q_alloc_size,
+ qdev->lrg_buf_q_alloc_virt_addr,
+ qdev->lrg_buf_q_alloc_phy_addr);
+ kfree(qdev->lrg_buf);
+ return -ENOMEM;
+ }
+
+ qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr;
+ qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr;
+ set_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags);
+ return 0;
+}
+
+static void ql_free_buffer_queues(struct ql3_adapter *qdev)
+{
+ if (!test_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags)) {
+ netdev_info(qdev->ndev, "Already done\n");
+ return;
+ }
+ kfree(qdev->lrg_buf);
+ dma_free_coherent(&qdev->pdev->dev, qdev->lrg_buf_q_alloc_size,
+ qdev->lrg_buf_q_alloc_virt_addr,
+ qdev->lrg_buf_q_alloc_phy_addr);
+
+ qdev->lrg_buf_q_virt_addr = NULL;
+
+ dma_free_coherent(&qdev->pdev->dev, qdev->small_buf_q_alloc_size,
+ qdev->small_buf_q_alloc_virt_addr,
+ qdev->small_buf_q_alloc_phy_addr);
+
+ qdev->small_buf_q_virt_addr = NULL;
+
+ clear_bit(QL_ALLOC_BUFQS_DONE, &qdev->flags);
+}
+
+static int ql_alloc_small_buffers(struct ql3_adapter *qdev)
+{
+ int i;
+ struct bufq_addr_element *small_buf_q_entry;
+
+ /* Currently we allocate on one of memory and use it for smallbuffers */
+ qdev->small_buf_total_size =
+ (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES *
+ QL_SMALL_BUFFER_SIZE);
+
+ qdev->small_buf_virt_addr =
+ dma_alloc_coherent(&qdev->pdev->dev,
+ qdev->small_buf_total_size,
+ &qdev->small_buf_phy_addr, GFP_KERNEL);
+
+ if (qdev->small_buf_virt_addr == NULL) {
+ netdev_err(qdev->ndev, "Failed to get small buffer memory\n");
+ return -ENOMEM;
+ }
+
+ qdev->small_buf_phy_addr_low = LS_64BITS(qdev->small_buf_phy_addr);
+ qdev->small_buf_phy_addr_high = MS_64BITS(qdev->small_buf_phy_addr);
+
+ small_buf_q_entry = qdev->small_buf_q_virt_addr;
+
+ /* Initialize the small buffer queue. */
+ for (i = 0; i < (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES); i++) {
+ small_buf_q_entry->addr_high =
+ cpu_to_le32(qdev->small_buf_phy_addr_high);
+ small_buf_q_entry->addr_low =
+ cpu_to_le32(qdev->small_buf_phy_addr_low +
+ (i * QL_SMALL_BUFFER_SIZE));
+ small_buf_q_entry++;
+ }
+ qdev->small_buf_index = 0;
+ set_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags);
+ return 0;
+}
+
+static void ql_free_small_buffers(struct ql3_adapter *qdev)
+{
+ if (!test_bit(QL_ALLOC_SMALL_BUF_DONE, &qdev->flags)) {
+ netdev_info(qdev->ndev, "Already done\n");
+ return;
+ }
+ if (qdev->small_buf_virt_addr != NULL) {
+ dma_free_coherent(&qdev->pdev->dev,
+ qdev->small_buf_total_size,
+ qdev->small_buf_virt_addr,
+ qdev->small_buf_phy_addr);
+
+ qdev->small_buf_virt_addr = NULL;
+ }
+}
+
+static void ql_free_large_buffers(struct ql3_adapter *qdev)
+{
+ int i = 0;
+ struct ql_rcv_buf_cb *lrg_buf_cb;
+
+ for (i = 0; i < qdev->num_large_buffers; i++) {
+ lrg_buf_cb = &qdev->lrg_buf[i];
+ if (lrg_buf_cb->skb) {
+ dev_kfree_skb(lrg_buf_cb->skb);
+ dma_unmap_single(&qdev->pdev->dev,
+ dma_unmap_addr(lrg_buf_cb, mapaddr),
+ dma_unmap_len(lrg_buf_cb, maplen),
+ DMA_FROM_DEVICE);
+ memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
+ } else {
+ break;
+ }
+ }
+}
+
+static void ql_init_large_buffers(struct ql3_adapter *qdev)
+{
+ int i;
+ struct ql_rcv_buf_cb *lrg_buf_cb;
+ struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr;
+
+ for (i = 0; i < qdev->num_large_buffers; i++) {
+ lrg_buf_cb = &qdev->lrg_buf[i];
+ buf_addr_ele->addr_high = lrg_buf_cb->buf_phy_addr_high;
+ buf_addr_ele->addr_low = lrg_buf_cb->buf_phy_addr_low;
+ buf_addr_ele++;
+ }
+ qdev->lrg_buf_index = 0;
+ qdev->lrg_buf_skb_check = 0;
+}
+
+static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
+{
+ int i;
+ struct ql_rcv_buf_cb *lrg_buf_cb;
+ struct sk_buff *skb;
+ dma_addr_t map;
+ int err;
+
+ for (i = 0; i < qdev->num_large_buffers; i++) {
+ lrg_buf_cb = &qdev->lrg_buf[i];
+ memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
+
+ skb = netdev_alloc_skb(qdev->ndev,
+ qdev->lrg_buffer_len);
+ if (unlikely(!skb)) {
+ /* Better luck next round */
+ netdev_err(qdev->ndev,
+ "large buff alloc failed for %d bytes at index %d\n",
+ qdev->lrg_buffer_len * 2, i);
+ ql_free_large_buffers(qdev);
+ return -ENOMEM;
+ } else {
+ lrg_buf_cb->index = i;
+ /*
+ * We save some space to copy the ethhdr from first
+ * buffer
+ */
+ skb_reserve(skb, QL_HEADER_SPACE);
+ map = dma_map_single(&qdev->pdev->dev, skb->data,
+ qdev->lrg_buffer_len - QL_HEADER_SPACE,
+ DMA_FROM_DEVICE);
+
+ err = dma_mapping_error(&qdev->pdev->dev, map);
+ if (err) {
+ netdev_err(qdev->ndev,
+ "PCI mapping failed with error: %d\n",
+ err);
+ dev_kfree_skb_irq(skb);
+ ql_free_large_buffers(qdev);
+ return -ENOMEM;
+ }
+
+ lrg_buf_cb->skb = skb;
+ dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
+ dma_unmap_len_set(lrg_buf_cb, maplen,
+ qdev->lrg_buffer_len -
+ QL_HEADER_SPACE);
+ lrg_buf_cb->buf_phy_addr_low =
+ cpu_to_le32(LS_64BITS(map));
+ lrg_buf_cb->buf_phy_addr_high =
+ cpu_to_le32(MS_64BITS(map));
+ }
+ }
+ return 0;
+}
+
+static void ql_free_send_free_list(struct ql3_adapter *qdev)
+{
+ struct ql_tx_buf_cb *tx_cb;
+ int i;
+
+ tx_cb = &qdev->tx_buf[0];
+ for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
+ kfree(tx_cb->oal);
+ tx_cb->oal = NULL;
+ tx_cb++;
+ }
+}
+
+static int ql_create_send_free_list(struct ql3_adapter *qdev)
+{
+ struct ql_tx_buf_cb *tx_cb;
+ int i;
+ struct ob_mac_iocb_req *req_q_curr = qdev->req_q_virt_addr;
+
+ /* Create free list of transmit buffers */
+ for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
+
+ tx_cb = &qdev->tx_buf[i];
+ tx_cb->skb = NULL;
+ tx_cb->queue_entry = req_q_curr;
+ req_q_curr++;
+ tx_cb->oal = kmalloc(512, GFP_KERNEL);
+ if (tx_cb->oal == NULL)
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
+{
+ if (qdev->ndev->mtu == NORMAL_MTU_SIZE) {
+ qdev->num_lbufq_entries = NUM_LBUFQ_ENTRIES;
+ qdev->lrg_buffer_len = NORMAL_MTU_SIZE;
+ } else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) {
+ /*
+ * Bigger buffers, so less of them.
+ */
+ qdev->num_lbufq_entries = JUMBO_NUM_LBUFQ_ENTRIES;
+ qdev->lrg_buffer_len = JUMBO_MTU_SIZE;
+ } else {
+ netdev_err(qdev->ndev, "Invalid mtu size: %d. Only %d and %d are accepted.\n",
+ qdev->ndev->mtu, NORMAL_MTU_SIZE, JUMBO_MTU_SIZE);
+ return -ENOMEM;
+ }
+ qdev->num_large_buffers =
+ qdev->num_lbufq_entries * QL_ADDR_ELE_PER_BUFQ_ENTRY;
+ qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE;
+ qdev->max_frame_size =
+ (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE;
+
+ /*
+ * First allocate a page of shared memory and use it for shadow
+ * locations of Network Request Queue Consumer Address Register and
+ * Network Completion Queue Producer Index Register
+ */
+ qdev->shadow_reg_virt_addr =
+ dma_alloc_coherent(&qdev->pdev->dev, PAGE_SIZE,
+ &qdev->shadow_reg_phy_addr, GFP_KERNEL);
+
+ if (qdev->shadow_reg_virt_addr != NULL) {
+ qdev->preq_consumer_index = qdev->shadow_reg_virt_addr;
+ qdev->req_consumer_index_phy_addr_high =
+ MS_64BITS(qdev->shadow_reg_phy_addr);
+ qdev->req_consumer_index_phy_addr_low =
+ LS_64BITS(qdev->shadow_reg_phy_addr);
+
+ qdev->prsp_producer_index =
+ (__le32 *) (((u8 *) qdev->preq_consumer_index) + 8);
+ qdev->rsp_producer_index_phy_addr_high =
+ qdev->req_consumer_index_phy_addr_high;
+ qdev->rsp_producer_index_phy_addr_low =
+ qdev->req_consumer_index_phy_addr_low + 8;
+ } else {
+ netdev_err(qdev->ndev, "shadowReg Alloc failed\n");
+ return -ENOMEM;
+ }
+
+ if (ql_alloc_net_req_rsp_queues(qdev) != 0) {
+ netdev_err(qdev->ndev, "ql_alloc_net_req_rsp_queues failed\n");
+ goto err_req_rsp;
+ }
+
+ if (ql_alloc_buffer_queues(qdev) != 0) {
+ netdev_err(qdev->ndev, "ql_alloc_buffer_queues failed\n");
+ goto err_buffer_queues;
+ }
+
+ if (ql_alloc_small_buffers(qdev) != 0) {
+ netdev_err(qdev->ndev, "ql_alloc_small_buffers failed\n");
+ goto err_small_buffers;
+ }
+
+ if (ql_alloc_large_buffers(qdev) != 0) {
+ netdev_err(qdev->ndev, "ql_alloc_large_buffers failed\n");
+ goto err_small_buffers;
+ }
+
+ /* Initialize the large buffer queue. */
+ ql_init_large_buffers(qdev);
+ if (ql_create_send_free_list(qdev))
+ goto err_free_list;
+
+ qdev->rsp_current = qdev->rsp_q_virt_addr;
+
+ return 0;
+err_free_list:
+ ql_free_send_free_list(qdev);
+err_small_buffers:
+ ql_free_buffer_queues(qdev);
+err_buffer_queues:
+ ql_free_net_req_rsp_queues(qdev);
+err_req_rsp:
+ dma_free_coherent(&qdev->pdev->dev, PAGE_SIZE,
+ qdev->shadow_reg_virt_addr,
+ qdev->shadow_reg_phy_addr);
+
+ return -ENOMEM;
+}
+
+static void ql_free_mem_resources(struct ql3_adapter *qdev)
+{
+ ql_free_send_free_list(qdev);
+ ql_free_large_buffers(qdev);
+ ql_free_small_buffers(qdev);
+ ql_free_buffer_queues(qdev);
+ ql_free_net_req_rsp_queues(qdev);
+ if (qdev->shadow_reg_virt_addr != NULL) {
+ dma_free_coherent(&qdev->pdev->dev, PAGE_SIZE,
+ qdev->shadow_reg_virt_addr,
+ qdev->shadow_reg_phy_addr);
+ qdev->shadow_reg_virt_addr = NULL;
+ }
+}
+
+static int ql_init_misc_registers(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_local_ram_registers __iomem *local_ram =
+ (void __iomem *)qdev->mem_map_registers;
+
+ if (ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK,
+ (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
+ 2) << 4))
+ return -1;
+
+ ql_write_page2_reg(qdev,
+ &local_ram->bufletSize, qdev->nvram_data.bufletSize);
+
+ ql_write_page2_reg(qdev,
+ &local_ram->maxBufletCount,
+ qdev->nvram_data.bufletCount);
+
+ ql_write_page2_reg(qdev,
+ &local_ram->freeBufletThresholdLow,
+ (qdev->nvram_data.tcpWindowThreshold25 << 16) |
+ (qdev->nvram_data.tcpWindowThreshold0));
+
+ ql_write_page2_reg(qdev,
+ &local_ram->freeBufletThresholdHigh,
+ qdev->nvram_data.tcpWindowThreshold50);
+
+ ql_write_page2_reg(qdev,
+ &local_ram->ipHashTableBase,
+ (qdev->nvram_data.ipHashTableBaseHi << 16) |
+ qdev->nvram_data.ipHashTableBaseLo);
+ ql_write_page2_reg(qdev,
+ &local_ram->ipHashTableCount,
+ qdev->nvram_data.ipHashTableSize);
+ ql_write_page2_reg(qdev,
+ &local_ram->tcpHashTableBase,
+ (qdev->nvram_data.tcpHashTableBaseHi << 16) |
+ qdev->nvram_data.tcpHashTableBaseLo);
+ ql_write_page2_reg(qdev,
+ &local_ram->tcpHashTableCount,
+ qdev->nvram_data.tcpHashTableSize);
+ ql_write_page2_reg(qdev,
+ &local_ram->ncbBase,
+ (qdev->nvram_data.ncbTableBaseHi << 16) |
+ qdev->nvram_data.ncbTableBaseLo);
+ ql_write_page2_reg(qdev,
+ &local_ram->maxNcbCount,
+ qdev->nvram_data.ncbTableSize);
+ ql_write_page2_reg(qdev,
+ &local_ram->drbBase,
+ (qdev->nvram_data.drbTableBaseHi << 16) |
+ qdev->nvram_data.drbTableBaseLo);
+ ql_write_page2_reg(qdev,
+ &local_ram->maxDrbCount,
+ qdev->nvram_data.drbTableSize);
+ ql_sem_unlock(qdev, QL_DDR_RAM_SEM_MASK);
+ return 0;
+}
+
+static int ql_adapter_initialize(struct ql3_adapter *qdev)
+{
+ u32 value;
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ __iomem u32 *spir = &port_regs->CommonRegs.serialPortInterfaceReg;
+ struct ql3xxx_host_memory_registers __iomem *hmem_regs =
+ (void __iomem *)port_regs;
+ u32 delay = 10;
+ int status = 0;
+
+ if (ql_mii_setup(qdev))
+ return -1;
+
+ /* Bring out PHY out of reset */
+ ql_write_common_reg(qdev, spir,
+ (ISP_SERIAL_PORT_IF_WE |
+ (ISP_SERIAL_PORT_IF_WE << 16)));
+ /* Give the PHY time to come out of reset. */
+ mdelay(100);
+ qdev->port_link_state = LS_DOWN;
+ netif_carrier_off(qdev->ndev);
+
+ /* V2 chip fix for ARS-39168. */
+ ql_write_common_reg(qdev, spir,
+ (ISP_SERIAL_PORT_IF_SDE |
+ (ISP_SERIAL_PORT_IF_SDE << 16)));
+
+ /* Request Queue Registers */
+ *((u32 *)(qdev->preq_consumer_index)) = 0;
+ atomic_set(&qdev->tx_count, NUM_REQ_Q_ENTRIES);
+ qdev->req_producer_index = 0;
+
+ ql_write_page1_reg(qdev,
+ &hmem_regs->reqConsumerIndexAddrHigh,
+ qdev->req_consumer_index_phy_addr_high);
+ ql_write_page1_reg(qdev,
+ &hmem_regs->reqConsumerIndexAddrLow,
+ qdev->req_consumer_index_phy_addr_low);
+
+ ql_write_page1_reg(qdev,
+ &hmem_regs->reqBaseAddrHigh,
+ MS_64BITS(qdev->req_q_phy_addr));
+ ql_write_page1_reg(qdev,
+ &hmem_regs->reqBaseAddrLow,
+ LS_64BITS(qdev->req_q_phy_addr));
+ ql_write_page1_reg(qdev, &hmem_regs->reqLength, NUM_REQ_Q_ENTRIES);
+
+ /* Response Queue Registers */
+ *((__le16 *) (qdev->prsp_producer_index)) = 0;
+ qdev->rsp_consumer_index = 0;
+ qdev->rsp_current = qdev->rsp_q_virt_addr;
+
+ ql_write_page1_reg(qdev,
+ &hmem_regs->rspProducerIndexAddrHigh,
+ qdev->rsp_producer_index_phy_addr_high);
+
+ ql_write_page1_reg(qdev,
+ &hmem_regs->rspProducerIndexAddrLow,
+ qdev->rsp_producer_index_phy_addr_low);
+
+ ql_write_page1_reg(qdev,
+ &hmem_regs->rspBaseAddrHigh,
+ MS_64BITS(qdev->rsp_q_phy_addr));
+
+ ql_write_page1_reg(qdev,
+ &hmem_regs->rspBaseAddrLow,
+ LS_64BITS(qdev->rsp_q_phy_addr));
+
+ ql_write_page1_reg(qdev, &hmem_regs->rspLength, NUM_RSP_Q_ENTRIES);
+
+ /* Large Buffer Queue */
+ ql_write_page1_reg(qdev,
+ &hmem_regs->rxLargeQBaseAddrHigh,
+ MS_64BITS(qdev->lrg_buf_q_phy_addr));
+
+ ql_write_page1_reg(qdev,
+ &hmem_regs->rxLargeQBaseAddrLow,
+ LS_64BITS(qdev->lrg_buf_q_phy_addr));
+
+ ql_write_page1_reg(qdev,
+ &hmem_regs->rxLargeQLength,
+ qdev->num_lbufq_entries);
+
+ ql_write_page1_reg(qdev,
+ &hmem_regs->rxLargeBufferLength,
+ qdev->lrg_buffer_len);
+
+ /* Small Buffer Queue */
+ ql_write_page1_reg(qdev,
+ &hmem_regs->rxSmallQBaseAddrHigh,
+ MS_64BITS(qdev->small_buf_q_phy_addr));
+
+ ql_write_page1_reg(qdev,
+ &hmem_regs->rxSmallQBaseAddrLow,
+ LS_64BITS(qdev->small_buf_q_phy_addr));
+
+ ql_write_page1_reg(qdev, &hmem_regs->rxSmallQLength, NUM_SBUFQ_ENTRIES);
+ ql_write_page1_reg(qdev,
+ &hmem_regs->rxSmallBufferLength,
+ QL_SMALL_BUFFER_SIZE);
+
+ qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1;
+ qdev->small_buf_release_cnt = 8;
+ qdev->lrg_buf_q_producer_index = qdev->num_lbufq_entries - 1;
+ qdev->lrg_buf_release_cnt = 8;
+ qdev->lrg_buf_next_free = qdev->lrg_buf_q_virt_addr;
+ qdev->small_buf_index = 0;
+ qdev->lrg_buf_index = 0;
+ qdev->lrg_buf_free_count = 0;
+ qdev->lrg_buf_free_head = NULL;
+ qdev->lrg_buf_free_tail = NULL;
+
+ ql_write_common_reg(qdev,
+ &port_regs->CommonRegs.
+ rxSmallQProducerIndex,
+ qdev->small_buf_q_producer_index);
+ ql_write_common_reg(qdev,
+ &port_regs->CommonRegs.
+ rxLargeQProducerIndex,
+ qdev->lrg_buf_q_producer_index);
+
+ /*
+ * Find out if the chip has already been initialized. If it has, then
+ * we skip some of the initialization.
+ */
+ clear_bit(QL_LINK_MASTER, &qdev->flags);
+ value = ql_read_page0_reg(qdev, &port_regs->portStatus);
+ if ((value & PORT_STATUS_IC) == 0) {
+
+ /* Chip has not been configured yet, so let it rip. */
+ if (ql_init_misc_registers(qdev)) {
+ status = -1;
+ goto out;
+ }
+
+ value = qdev->nvram_data.tcpMaxWindowSize;
+ ql_write_page0_reg(qdev, &port_regs->tcpMaxWindow, value);
+
+ value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig;
+
+ if (ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK,
+ (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
+ * 2) << 13)) {
+ status = -1;
+ goto out;
+ }
+ ql_write_page0_reg(qdev, &port_regs->ExternalHWConfig, value);
+ ql_write_page0_reg(qdev, &port_regs->InternalChipConfig,
+ (((INTERNAL_CHIP_SD | INTERNAL_CHIP_WE) <<
+ 16) | (INTERNAL_CHIP_SD |
+ INTERNAL_CHIP_WE)));
+ ql_sem_unlock(qdev, QL_FLASH_SEM_MASK);
+ }
+
+ if (qdev->mac_index)
+ ql_write_page0_reg(qdev,
+ &port_regs->mac1MaxFrameLengthReg,
+ qdev->max_frame_size);
+ else
+ ql_write_page0_reg(qdev,
+ &port_regs->mac0MaxFrameLengthReg,
+ qdev->max_frame_size);
+
+ if (ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
+ (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
+ 2) << 7)) {
+ status = -1;
+ goto out;
+ }
+
+ PHY_Setup(qdev);
+ ql_init_scan_mode(qdev);
+ ql_get_phy_owner(qdev);
+
+ /* Load the MAC Configuration */
+
+ /* Program lower 32 bits of the MAC address */
+ ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
+ (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16));
+ ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
+ ((qdev->ndev->dev_addr[2] << 24)
+ | (qdev->ndev->dev_addr[3] << 16)
+ | (qdev->ndev->dev_addr[4] << 8)
+ | qdev->ndev->dev_addr[5]));
+
+ /* Program top 16 bits of the MAC address */
+ ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
+ ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1));
+ ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
+ ((qdev->ndev->dev_addr[0] << 8)
+ | qdev->ndev->dev_addr[1]));
+
+ /* Enable Primary MAC */
+ ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
+ ((MAC_ADDR_INDIRECT_PTR_REG_PE << 16) |
+ MAC_ADDR_INDIRECT_PTR_REG_PE));
+
+ /* Clear Primary and Secondary IP addresses */
+ ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
+ ((IP_ADDR_INDEX_REG_MASK << 16) |
+ (qdev->mac_index << 2)));
+ ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
+
+ ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
+ ((IP_ADDR_INDEX_REG_MASK << 16) |
+ ((qdev->mac_index << 2) + 1)));
+ ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
+
+ ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
+
+ /* Indicate Configuration Complete */
+ ql_write_page0_reg(qdev,
+ &port_regs->portControl,
+ ((PORT_CONTROL_CC << 16) | PORT_CONTROL_CC));
+
+ do {
+ value = ql_read_page0_reg(qdev, &port_regs->portStatus);
+ if (value & PORT_STATUS_IC)
+ break;
+ spin_unlock_irq(&qdev->hw_lock);
+ msleep(500);
+ spin_lock_irq(&qdev->hw_lock);
+ } while (--delay);
+
+ if (delay == 0) {
+ netdev_err(qdev->ndev, "Hw Initialization timeout\n");
+ status = -1;
+ goto out;
+ }
+
+ /* Enable Ethernet Function */
+ if (qdev->device_id == QL3032_DEVICE_ID) {
+ value =
+ (QL3032_PORT_CONTROL_EF | QL3032_PORT_CONTROL_KIE |
+ QL3032_PORT_CONTROL_EIv6 | QL3032_PORT_CONTROL_EIv4 |
+ QL3032_PORT_CONTROL_ET);
+ ql_write_page0_reg(qdev, &port_regs->functionControl,
+ ((value << 16) | value));
+ } else {
+ value =
+ (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI |
+ PORT_CONTROL_HH);
+ ql_write_page0_reg(qdev, &port_regs->portControl,
+ ((value << 16) | value));
+ }
+
+
+out:
+ return status;
+}
+
+/*
+ * Caller holds hw_lock.
+ */
+static int ql_adapter_reset(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ int status = 0;
+ u16 value;
+ int max_wait_time;
+
+ set_bit(QL_RESET_ACTIVE, &qdev->flags);
+ clear_bit(QL_RESET_DONE, &qdev->flags);
+
+ /*
+ * Issue soft reset to chip.
+ */
+ netdev_printk(KERN_DEBUG, qdev->ndev, "Issue soft reset to chip\n");
+ ql_write_common_reg(qdev,
+ &port_regs->CommonRegs.ispControlStatus,
+ ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR));
+
+ /* Wait 3 seconds for reset to complete. */
+ netdev_printk(KERN_DEBUG, qdev->ndev,
+ "Wait 10 milliseconds for reset to complete\n");
+
+ /* Wait until the firmware tells us the Soft Reset is done */
+ max_wait_time = 5;
+ do {
+ value =
+ ql_read_common_reg(qdev,
+ &port_regs->CommonRegs.ispControlStatus);
+ if ((value & ISP_CONTROL_SR) == 0)
+ break;
+
+ mdelay(1000);
+ } while ((--max_wait_time));
+
+ /*
+ * Also, make sure that the Network Reset Interrupt bit has been
+ * cleared after the soft reset has taken place.
+ */
+ value =
+ ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
+ if (value & ISP_CONTROL_RI) {
+ netdev_printk(KERN_DEBUG, qdev->ndev,
+ "clearing RI after reset\n");
+ ql_write_common_reg(qdev,
+ &port_regs->CommonRegs.
+ ispControlStatus,
+ ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
+ }
+
+ if (max_wait_time == 0) {
+ /* Issue Force Soft Reset */
+ ql_write_common_reg(qdev,
+ &port_regs->CommonRegs.
+ ispControlStatus,
+ ((ISP_CONTROL_FSR << 16) |
+ ISP_CONTROL_FSR));
+ /*
+ * Wait until the firmware tells us the Force Soft Reset is
+ * done
+ */
+ max_wait_time = 5;
+ do {
+ value = ql_read_common_reg(qdev,
+ &port_regs->CommonRegs.
+ ispControlStatus);
+ if ((value & ISP_CONTROL_FSR) == 0)
+ break;
+ mdelay(1000);
+ } while ((--max_wait_time));
+ }
+ if (max_wait_time == 0)
+ status = 1;
+
+ clear_bit(QL_RESET_ACTIVE, &qdev->flags);
+ set_bit(QL_RESET_DONE, &qdev->flags);
+ return status;
+}
+
+static void ql_set_mac_info(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 value, port_status;
+ u8 func_number;
+
+ /* Get the function number */
+ value =
+ ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus);
+ func_number = (u8) ((value >> 4) & OPCODE_FUNC_ID_MASK);
+ port_status = ql_read_page0_reg(qdev, &port_regs->portStatus);
+ switch (value & ISP_CONTROL_FN_MASK) {
+ case ISP_CONTROL_FN0_NET:
+ qdev->mac_index = 0;
+ qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
+ qdev->mb_bit_mask = FN0_MA_BITS_MASK;
+ qdev->PHYAddr = PORT0_PHY_ADDRESS;
+ if (port_status & PORT_STATUS_SM0)
+ set_bit(QL_LINK_OPTICAL, &qdev->flags);
+ else
+ clear_bit(QL_LINK_OPTICAL, &qdev->flags);
+ break;
+
+ case ISP_CONTROL_FN1_NET:
+ qdev->mac_index = 1;
+ qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
+ qdev->mb_bit_mask = FN1_MA_BITS_MASK;
+ qdev->PHYAddr = PORT1_PHY_ADDRESS;
+ if (port_status & PORT_STATUS_SM1)
+ set_bit(QL_LINK_OPTICAL, &qdev->flags);
+ else
+ clear_bit(QL_LINK_OPTICAL, &qdev->flags);
+ break;
+
+ case ISP_CONTROL_FN0_SCSI:
+ case ISP_CONTROL_FN1_SCSI:
+ default:
+ netdev_printk(KERN_DEBUG, qdev->ndev,
+ "Invalid function number, ispControlStatus = 0x%x\n",
+ value);
+ break;
+ }
+ qdev->numPorts = qdev->nvram_data.version_and_numPorts >> 8;
+}
+
+static void ql_display_dev_info(struct net_device *ndev)
+{
+ struct ql3_adapter *qdev = netdev_priv(ndev);
+ struct pci_dev *pdev = qdev->pdev;
+
+ netdev_info(ndev,
+ "%s Adapter %d RevisionID %d found %s on PCI slot %d\n",
+ DRV_NAME, qdev->index, qdev->chip_rev_id,
+ qdev->device_id == QL3032_DEVICE_ID ? "QLA3032" : "QLA3022",
+ qdev->pci_slot);
+ netdev_info(ndev, "%s Interface\n",
+ test_bit(QL_LINK_OPTICAL, &qdev->flags) ? "OPTICAL" : "COPPER");
+
+ /*
+ * Print PCI bus width/type.
+ */
+ netdev_info(ndev, "Bus interface is %s %s\n",
+ ((qdev->pci_width == 64) ? "64-bit" : "32-bit"),
+ ((qdev->pci_x) ? "PCI-X" : "PCI"));
+
+ netdev_info(ndev, "mem IO base address adjusted = 0x%p\n",
+ qdev->mem_map_registers);
+ netdev_info(ndev, "Interrupt number = %d\n", pdev->irq);
+
+ netif_info(qdev, probe, ndev, "MAC address %pM\n", ndev->dev_addr);
+}
+
+static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset)
+{
+ struct net_device *ndev = qdev->ndev;
+ int retval = 0;
+
+ netif_stop_queue(ndev);
+ netif_carrier_off(ndev);
+
+ clear_bit(QL_ADAPTER_UP, &qdev->flags);
+ clear_bit(QL_LINK_MASTER, &qdev->flags);
+
+ ql_disable_interrupts(qdev);
+
+ free_irq(qdev->pdev->irq, ndev);
+
+ if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) {
+ netdev_info(qdev->ndev, "calling pci_disable_msi()\n");
+ clear_bit(QL_MSI_ENABLED, &qdev->flags);
+ pci_disable_msi(qdev->pdev);
+ }
+
+ del_timer_sync(&qdev->adapter_timer);
+
+ napi_disable(&qdev->napi);
+
+ if (do_reset) {
+ int soft_reset;
+ unsigned long hw_flags;
+
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+ if (ql_wait_for_drvr_lock(qdev)) {
+ soft_reset = ql_adapter_reset(qdev);
+ if (soft_reset) {
+ netdev_err(ndev, "ql_adapter_reset(%d) FAILED!\n",
+ qdev->index);
+ }
+ netdev_err(ndev,
+ "Releasing driver lock via chip reset\n");
+ } else {
+ netdev_err(ndev,
+ "Could not acquire driver lock to do reset!\n");
+ retval = -1;
+ }
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+ }
+ ql_free_mem_resources(qdev);
+ return retval;
+}
+
+static int ql_adapter_up(struct ql3_adapter *qdev)
+{
+ struct net_device *ndev = qdev->ndev;
+ int err;
+ unsigned long irq_flags = IRQF_SHARED;
+ unsigned long hw_flags;
+
+ if (ql_alloc_mem_resources(qdev)) {
+ netdev_err(ndev, "Unable to allocate buffers\n");
+ return -ENOMEM;
+ }
+
+ if (qdev->msi) {
+ if (pci_enable_msi(qdev->pdev)) {
+ netdev_err(ndev,
+ "User requested MSI, but MSI failed to initialize. Continuing without MSI.\n");
+ qdev->msi = 0;
+ } else {
+ netdev_info(ndev, "MSI Enabled...\n");
+ set_bit(QL_MSI_ENABLED, &qdev->flags);
+ irq_flags &= ~IRQF_SHARED;
+ }
+ }
+
+ err = request_irq(qdev->pdev->irq, ql3xxx_isr,
+ irq_flags, ndev->name, ndev);
+ if (err) {
+ netdev_err(ndev,
+ "Failed to reserve interrupt %d - already in use\n",
+ qdev->pdev->irq);
+ goto err_irq;
+ }
+
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+
+ if (!ql_wait_for_drvr_lock(qdev)) {
+ netdev_err(ndev, "Could not acquire driver lock\n");
+ err = -ENODEV;
+ goto err_lock;
+ }
+
+ err = ql_adapter_initialize(qdev);
+ if (err) {
+ netdev_err(ndev, "Unable to initialize adapter\n");
+ goto err_init;
+ }
+ ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
+
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+
+ set_bit(QL_ADAPTER_UP, &qdev->flags);
+
+ mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
+
+ napi_enable(&qdev->napi);
+ ql_enable_interrupts(qdev);
+ return 0;
+
+err_init:
+ ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
+err_lock:
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+ free_irq(qdev->pdev->irq, ndev);
+err_irq:
+ if (qdev->msi && test_bit(QL_MSI_ENABLED, &qdev->flags)) {
+ netdev_info(ndev, "calling pci_disable_msi()\n");
+ clear_bit(QL_MSI_ENABLED, &qdev->flags);
+ pci_disable_msi(qdev->pdev);
+ }
+ return err;
+}
+
+static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset)
+{
+ if (ql_adapter_down(qdev, reset) || ql_adapter_up(qdev)) {
+ netdev_err(qdev->ndev,
+ "Driver up/down cycle failed, closing device\n");
+ rtnl_lock();
+ dev_close(qdev->ndev);
+ rtnl_unlock();
+ return -1;
+ }
+ return 0;
+}
+
+static int ql3xxx_close(struct net_device *ndev)
+{
+ struct ql3_adapter *qdev = netdev_priv(ndev);
+
+ /*
+ * Wait for device to recover from a reset.
+ * (Rarely happens, but possible.)
+ */
+ while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
+ msleep(50);
+
+ ql_adapter_down(qdev, QL_DO_RESET);
+ return 0;
+}
+
+static int ql3xxx_open(struct net_device *ndev)
+{
+ struct ql3_adapter *qdev = netdev_priv(ndev);
+ return ql_adapter_up(qdev);
+}
+
+static int ql3xxx_set_mac_address(struct net_device *ndev, void *p)
+{
+ struct ql3_adapter *qdev = netdev_priv(ndev);
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ struct sockaddr *addr = p;
+ unsigned long hw_flags;
+
+ if (netif_running(ndev))
+ return -EBUSY;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ eth_hw_addr_set(ndev, addr->sa_data);
+
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+ /* Program lower 32 bits of the MAC address */
+ ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
+ (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16));
+ ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
+ ((ndev->dev_addr[2] << 24) | (ndev->
+ dev_addr[3] << 16) |
+ (ndev->dev_addr[4] << 8) | ndev->dev_addr[5]));
+
+ /* Program top 16 bits of the MAC address */
+ ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
+ ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1));
+ ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
+ ((ndev->dev_addr[0] << 8) | ndev->dev_addr[1]));
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+
+ return 0;
+}
+
+static void ql3xxx_tx_timeout(struct net_device *ndev, unsigned int txqueue)
+{
+ struct ql3_adapter *qdev = netdev_priv(ndev);
+
+ netdev_err(ndev, "Resetting...\n");
+ /*
+ * Stop the queues, we've got a problem.
+ */
+ netif_stop_queue(ndev);
+
+ /*
+ * Wake up the worker to process this event.
+ */
+ queue_delayed_work(qdev->workqueue, &qdev->tx_timeout_work, 0);
+}
+
+static void ql_reset_work(struct work_struct *work)
+{
+ struct ql3_adapter *qdev =
+ container_of(work, struct ql3_adapter, reset_work.work);
+ struct net_device *ndev = qdev->ndev;
+ u32 value;
+ struct ql_tx_buf_cb *tx_cb;
+ int max_wait_time, i;
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ unsigned long hw_flags;
+
+ if (test_bit(QL_RESET_PER_SCSI, &qdev->flags) ||
+ test_bit(QL_RESET_START, &qdev->flags)) {
+ clear_bit(QL_LINK_MASTER, &qdev->flags);
+
+ /*
+ * Loop through the active list and return the skb.
+ */
+ for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
+ int j;
+ tx_cb = &qdev->tx_buf[i];
+ if (tx_cb->skb) {
+ netdev_printk(KERN_DEBUG, ndev,
+ "Freeing lost SKB\n");
+ dma_unmap_single(&qdev->pdev->dev,
+ dma_unmap_addr(&tx_cb->map[0], mapaddr),
+ dma_unmap_len(&tx_cb->map[0], maplen),
+ DMA_TO_DEVICE);
+ for (j = 1; j < tx_cb->seg_count; j++) {
+ dma_unmap_page(&qdev->pdev->dev,
+ dma_unmap_addr(&tx_cb->map[j], mapaddr),
+ dma_unmap_len(&tx_cb->map[j], maplen),
+ DMA_TO_DEVICE);
+ }
+ dev_kfree_skb(tx_cb->skb);
+ tx_cb->skb = NULL;
+ }
+ }
+
+ netdev_err(ndev, "Clearing NRI after reset\n");
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+ ql_write_common_reg(qdev,
+ &port_regs->CommonRegs.
+ ispControlStatus,
+ ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
+ /*
+ * Wait the for Soft Reset to Complete.
+ */
+ max_wait_time = 10;
+ do {
+ value = ql_read_common_reg(qdev,
+ &port_regs->CommonRegs.
+
+ ispControlStatus);
+ if ((value & ISP_CONTROL_SR) == 0) {
+ netdev_printk(KERN_DEBUG, ndev,
+ "reset completed\n");
+ break;
+ }
+
+ if (value & ISP_CONTROL_RI) {
+ netdev_printk(KERN_DEBUG, ndev,
+ "clearing NRI after reset\n");
+ ql_write_common_reg(qdev,
+ &port_regs->
+ CommonRegs.
+ ispControlStatus,
+ ((ISP_CONTROL_RI <<
+ 16) | ISP_CONTROL_RI));
+ }
+
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+ ssleep(1);
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+ } while (--max_wait_time);
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+
+ if (value & ISP_CONTROL_SR) {
+
+ /*
+ * Set the reset flags and clear the board again.
+ * Nothing else to do...
+ */
+ netdev_err(ndev,
+ "Timed out waiting for reset to complete\n");
+ netdev_err(ndev, "Do a reset\n");
+ clear_bit(QL_RESET_PER_SCSI, &qdev->flags);
+ clear_bit(QL_RESET_START, &qdev->flags);
+ ql_cycle_adapter(qdev, QL_DO_RESET);
+ return;
+ }
+
+ clear_bit(QL_RESET_ACTIVE, &qdev->flags);
+ clear_bit(QL_RESET_PER_SCSI, &qdev->flags);
+ clear_bit(QL_RESET_START, &qdev->flags);
+ ql_cycle_adapter(qdev, QL_NO_RESET);
+ }
+}
+
+static void ql_tx_timeout_work(struct work_struct *work)
+{
+ struct ql3_adapter *qdev =
+ container_of(work, struct ql3_adapter, tx_timeout_work.work);
+
+ ql_cycle_adapter(qdev, QL_DO_RESET);
+}
+
+static void ql_get_board_info(struct ql3_adapter *qdev)
+{
+ struct ql3xxx_port_registers __iomem *port_regs =
+ qdev->mem_map_registers;
+ u32 value;
+
+ value = ql_read_page0_reg_l(qdev, &port_regs->portStatus);
+
+ qdev->chip_rev_id = ((value & PORT_STATUS_REV_ID_MASK) >> 12);
+ if (value & PORT_STATUS_64)
+ qdev->pci_width = 64;
+ else
+ qdev->pci_width = 32;
+ if (value & PORT_STATUS_X)
+ qdev->pci_x = 1;
+ else
+ qdev->pci_x = 0;
+ qdev->pci_slot = (u8) PCI_SLOT(qdev->pdev->devfn);
+}
+
+static void ql3xxx_timer(struct timer_list *t)
+{
+ struct ql3_adapter *qdev = from_timer(qdev, t, adapter_timer);
+ queue_delayed_work(qdev->workqueue, &qdev->link_state_work, 0);
+}
+
+static const struct net_device_ops ql3xxx_netdev_ops = {
+ .ndo_open = ql3xxx_open,
+ .ndo_start_xmit = ql3xxx_send,
+ .ndo_stop = ql3xxx_close,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = ql3xxx_set_mac_address,
+ .ndo_tx_timeout = ql3xxx_tx_timeout,
+};
+
+static int ql3xxx_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pci_entry)
+{
+ struct net_device *ndev = NULL;
+ struct ql3_adapter *qdev = NULL;
+ static int cards_found;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ pr_err("%s cannot enable PCI device\n", pci_name(pdev));
+ goto err_out;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ pr_err("%s cannot obtain PCI resources\n", pci_name(pdev));
+ goto err_out_disable_pdev;
+ }
+
+ pci_set_master(pdev);
+
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ pr_err("%s no usable DMA configuration\n", pci_name(pdev));
+ goto err_out_free_regions;
+ }
+
+ ndev = alloc_etherdev(sizeof(struct ql3_adapter));
+ if (!ndev) {
+ err = -ENOMEM;
+ goto err_out_free_regions;
+ }
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ pci_set_drvdata(pdev, ndev);
+
+ qdev = netdev_priv(ndev);
+ qdev->index = cards_found;
+ qdev->ndev = ndev;
+ qdev->pdev = pdev;
+ qdev->device_id = pci_entry->device;
+ qdev->port_link_state = LS_DOWN;
+ if (msi)
+ qdev->msi = 1;
+
+ qdev->msg_enable = netif_msg_init(debug, default_msg);
+
+ ndev->features |= NETIF_F_HIGHDMA;
+ if (qdev->device_id == QL3032_DEVICE_ID)
+ ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
+
+ qdev->mem_map_registers = pci_ioremap_bar(pdev, 1);
+ if (!qdev->mem_map_registers) {
+ pr_err("%s: cannot map device registers\n", pci_name(pdev));
+ err = -EIO;
+ goto err_out_free_ndev;
+ }
+
+ spin_lock_init(&qdev->adapter_lock);
+ spin_lock_init(&qdev->hw_lock);
+
+ /* Set driver entry points */
+ ndev->netdev_ops = &ql3xxx_netdev_ops;
+ ndev->ethtool_ops = &ql3xxx_ethtool_ops;
+ ndev->watchdog_timeo = 5 * HZ;
+
+ netif_napi_add(ndev, &qdev->napi, ql_poll);
+
+ ndev->irq = pdev->irq;
+
+ /* make sure the EEPROM is good */
+ if (ql_get_nvram_params(qdev)) {
+ pr_alert("%s: Adapter #%d, Invalid NVRAM parameters\n",
+ __func__, qdev->index);
+ err = -EIO;
+ goto err_out_iounmap;
+ }
+
+ ql_set_mac_info(qdev);
+
+ /* Validate and set parameters */
+ if (qdev->mac_index) {
+ ndev->mtu = qdev->nvram_data.macCfg_port1.etherMtu_mac ;
+ ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn2.macAddress);
+ } else {
+ ndev->mtu = qdev->nvram_data.macCfg_port0.etherMtu_mac ;
+ ql_set_mac_addr(ndev, qdev->nvram_data.funcCfg_fn0.macAddress);
+ }
+
+ ndev->tx_queue_len = NUM_REQ_Q_ENTRIES;
+
+ /* Record PCI bus information. */
+ ql_get_board_info(qdev);
+
+ /*
+ * Set the Maximum Memory Read Byte Count value. We do this to handle
+ * jumbo frames.
+ */
+ if (qdev->pci_x)
+ pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036);
+
+ err = register_netdev(ndev);
+ if (err) {
+ pr_err("%s: cannot register net device\n", pci_name(pdev));
+ goto err_out_iounmap;
+ }
+
+ /* we're going to reset, so assume we have no link for now */
+
+ netif_carrier_off(ndev);
+ netif_stop_queue(ndev);
+
+ qdev->workqueue = create_singlethread_workqueue(ndev->name);
+ if (!qdev->workqueue) {
+ unregister_netdev(ndev);
+ err = -ENOMEM;
+ goto err_out_iounmap;
+ }
+
+ INIT_DELAYED_WORK(&qdev->reset_work, ql_reset_work);
+ INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work);
+ INIT_DELAYED_WORK(&qdev->link_state_work, ql_link_state_machine_work);
+
+ timer_setup(&qdev->adapter_timer, ql3xxx_timer, 0);
+ qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */
+
+ if (!cards_found) {
+ pr_alert("%s\n", DRV_STRING);
+ pr_alert("Driver name: %s, Version: %s\n",
+ DRV_NAME, DRV_VERSION);
+ }
+ ql_display_dev_info(ndev);
+
+ cards_found++;
+ return 0;
+
+err_out_iounmap:
+ iounmap(qdev->mem_map_registers);
+err_out_free_ndev:
+ free_netdev(ndev);
+err_out_free_regions:
+ pci_release_regions(pdev);
+err_out_disable_pdev:
+ pci_disable_device(pdev);
+err_out:
+ return err;
+}
+
+static void ql3xxx_remove(struct pci_dev *pdev)
+{
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ struct ql3_adapter *qdev = netdev_priv(ndev);
+
+ unregister_netdev(ndev);
+
+ ql_disable_interrupts(qdev);
+
+ if (qdev->workqueue) {
+ cancel_delayed_work(&qdev->reset_work);
+ cancel_delayed_work(&qdev->tx_timeout_work);
+ destroy_workqueue(qdev->workqueue);
+ qdev->workqueue = NULL;
+ }
+
+ iounmap(qdev->mem_map_registers);
+ pci_release_regions(pdev);
+ free_netdev(ndev);
+}
+
+static struct pci_driver ql3xxx_driver = {
+
+ .name = DRV_NAME,
+ .id_table = ql3xxx_pci_tbl,
+ .probe = ql3xxx_probe,
+ .remove = ql3xxx_remove,
+};
+
+module_pci_driver(ql3xxx_driver);
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.h b/drivers/net/ethernet/qlogic/qla3xxx.h
new file mode 100644
index 000000000..fb4398303
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qla3xxx.h
@@ -0,0 +1,1188 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * QLogic QLA3xxx NIC HBA Driver
+ * Copyright (c) 2003-2006 QLogic Corporation
+ */
+#ifndef _QLA3XXX_H_
+#define _QLA3XXX_H_
+
+/*
+ * IOCB Definitions...
+ */
+#pragma pack(1)
+
+#define OPCODE_OB_MAC_IOCB_FN0 0x01
+#define OPCODE_OB_MAC_IOCB_FN2 0x21
+
+#define OPCODE_IB_MAC_IOCB 0xF9
+#define OPCODE_IB_3032_MAC_IOCB 0x09
+#define OPCODE_IB_IP_IOCB 0xFA
+#define OPCODE_IB_3032_IP_IOCB 0x0A
+
+#define OPCODE_FUNC_ID_MASK 0x30
+#define OUTBOUND_MAC_IOCB 0x01 /* plus function bits */
+
+#define FN0_MA_BITS_MASK 0x00
+#define FN1_MA_BITS_MASK 0x80
+
+struct ob_mac_iocb_req {
+ u8 opcode;
+ u8 flags;
+#define OB_MAC_IOCB_REQ_MA 0xe0
+#define OB_MAC_IOCB_REQ_F 0x10
+#define OB_MAC_IOCB_REQ_X 0x08
+#define OB_MAC_IOCB_REQ_D 0x02
+#define OB_MAC_IOCB_REQ_I 0x01
+ u8 flags1;
+#define OB_3032MAC_IOCB_REQ_IC 0x04
+#define OB_3032MAC_IOCB_REQ_TC 0x02
+#define OB_3032MAC_IOCB_REQ_UC 0x01
+ u8 reserved0;
+
+ u32 transaction_id; /* opaque for hardware */
+ __le16 data_len;
+ u8 ip_hdr_off;
+ u8 ip_hdr_len;
+ __le32 reserved1;
+ __le32 reserved2;
+ __le32 buf_addr0_low;
+ __le32 buf_addr0_high;
+ __le32 buf_0_len;
+ __le32 buf_addr1_low;
+ __le32 buf_addr1_high;
+ __le32 buf_1_len;
+ __le32 buf_addr2_low;
+ __le32 buf_addr2_high;
+ __le32 buf_2_len;
+ __le32 reserved3;
+ __le32 reserved4;
+};
+/*
+ * The following constants define control bits for buffer
+ * length fields for all IOCB's.
+ */
+#define OB_MAC_IOCB_REQ_E 0x80000000 /* Last valid buffer in list. */
+#define OB_MAC_IOCB_REQ_C 0x40000000 /* points to an OAL. (continuation) */
+#define OB_MAC_IOCB_REQ_L 0x20000000 /* Auburn local address pointer. */
+#define OB_MAC_IOCB_REQ_R 0x10000000 /* 32-bit address pointer. */
+
+struct ob_mac_iocb_rsp {
+ u8 opcode;
+ u8 flags;
+#define OB_MAC_IOCB_RSP_P 0x08
+#define OB_MAC_IOCB_RSP_L 0x04
+#define OB_MAC_IOCB_RSP_S 0x02
+#define OB_MAC_IOCB_RSP_I 0x01
+
+ __le16 reserved0;
+ u32 transaction_id; /* opaque for hardware */
+ __le32 reserved1;
+ __le32 reserved2;
+};
+
+struct ib_mac_iocb_rsp {
+ u8 opcode;
+#define IB_MAC_IOCB_RSP_V 0x80
+ u8 flags;
+#define IB_MAC_IOCB_RSP_S 0x80
+#define IB_MAC_IOCB_RSP_H1 0x40
+#define IB_MAC_IOCB_RSP_H0 0x20
+#define IB_MAC_IOCB_RSP_B 0x10
+#define IB_MAC_IOCB_RSP_M 0x08
+#define IB_MAC_IOCB_RSP_MA 0x07
+
+ __le16 length;
+ __le32 reserved;
+ __le32 ial_low;
+ __le32 ial_high;
+
+};
+
+struct ob_ip_iocb_req {
+ u8 opcode;
+ __le16 flags;
+#define OB_IP_IOCB_REQ_O 0x100
+#define OB_IP_IOCB_REQ_H 0x008
+#define OB_IP_IOCB_REQ_U 0x004
+#define OB_IP_IOCB_REQ_D 0x002
+#define OB_IP_IOCB_REQ_I 0x001
+
+ u8 reserved0;
+
+ __le32 transaction_id;
+ __le16 data_len;
+ __le16 reserved1;
+ __le32 hncb_ptr_low;
+ __le32 hncb_ptr_high;
+ __le32 buf_addr0_low;
+ __le32 buf_addr0_high;
+ __le32 buf_0_len;
+ __le32 buf_addr1_low;
+ __le32 buf_addr1_high;
+ __le32 buf_1_len;
+ __le32 buf_addr2_low;
+ __le32 buf_addr2_high;
+ __le32 buf_2_len;
+ __le32 reserved2;
+ __le32 reserved3;
+};
+
+/* defines for BufferLength fields above */
+#define OB_IP_IOCB_REQ_E 0x80000000
+#define OB_IP_IOCB_REQ_C 0x40000000
+#define OB_IP_IOCB_REQ_L 0x20000000
+#define OB_IP_IOCB_REQ_R 0x10000000
+
+struct ob_ip_iocb_rsp {
+ u8 opcode;
+ u8 flags;
+#define OB_MAC_IOCB_RSP_H 0x10
+#define OB_MAC_IOCB_RSP_E 0x08
+#define OB_MAC_IOCB_RSP_L 0x04
+#define OB_MAC_IOCB_RSP_S 0x02
+#define OB_MAC_IOCB_RSP_I 0x01
+
+ __le16 reserved0;
+ __le32 transaction_id;
+ __le32 reserved1;
+ __le32 reserved2;
+};
+
+struct ib_ip_iocb_rsp {
+ u8 opcode;
+#define IB_IP_IOCB_RSP_3032_V 0x80
+#define IB_IP_IOCB_RSP_3032_O 0x40
+#define IB_IP_IOCB_RSP_3032_I 0x20
+#define IB_IP_IOCB_RSP_3032_R 0x10
+ u8 flags;
+#define IB_IP_IOCB_RSP_S 0x80
+#define IB_IP_IOCB_RSP_H1 0x40
+#define IB_IP_IOCB_RSP_H0 0x20
+#define IB_IP_IOCB_RSP_B 0x10
+#define IB_IP_IOCB_RSP_M 0x08
+#define IB_IP_IOCB_RSP_MA 0x07
+
+ __le16 length;
+ __le16 checksum;
+#define IB_IP_IOCB_RSP_3032_ICE 0x01
+#define IB_IP_IOCB_RSP_3032_CE 0x02
+#define IB_IP_IOCB_RSP_3032_NUC 0x04
+#define IB_IP_IOCB_RSP_3032_UDP 0x08
+#define IB_IP_IOCB_RSP_3032_TCP 0x10
+#define IB_IP_IOCB_RSP_3032_IPE 0x20
+ __le16 reserved;
+#define IB_IP_IOCB_RSP_R 0x01
+ __le32 ial_low;
+ __le32 ial_high;
+};
+
+struct net_rsp_iocb {
+ u8 opcode;
+ u8 flags;
+ __le16 reserved0;
+ __le32 reserved[3];
+};
+#pragma pack()
+
+/*
+ * Register Definitions...
+ */
+#define PORT0_PHY_ADDRESS 0x1e00
+#define PORT1_PHY_ADDRESS 0x1f00
+
+#define ETHERNET_CRC_SIZE 4
+
+#define MII_SCAN_REGISTER 0x00000001
+
+#define PHY_ID_0_REG 2
+#define PHY_ID_1_REG 3
+
+#define PHY_OUI_1_MASK 0xfc00
+#define PHY_MODEL_MASK 0x03f0
+
+/* Address for the Agere Phy */
+#define MII_AGERE_ADDR_1 0x00001000
+#define MII_AGERE_ADDR_2 0x00001100
+
+/* 32-bit ispControlStatus */
+enum {
+ ISP_CONTROL_NP_MASK = 0x0003,
+ ISP_CONTROL_NP_PCSR = 0x0000,
+ ISP_CONTROL_NP_HMCR = 0x0001,
+ ISP_CONTROL_NP_LRAMCR = 0x0002,
+ ISP_CONTROL_NP_PSR = 0x0003,
+ ISP_CONTROL_RI = 0x0008,
+ ISP_CONTROL_CI = 0x0010,
+ ISP_CONTROL_PI = 0x0020,
+ ISP_CONTROL_IN = 0x0040,
+ ISP_CONTROL_BE = 0x0080,
+ ISP_CONTROL_FN_MASK = 0x0700,
+ ISP_CONTROL_FN0_NET = 0x0400,
+ ISP_CONTROL_FN0_SCSI = 0x0500,
+ ISP_CONTROL_FN1_NET = 0x0600,
+ ISP_CONTROL_FN1_SCSI = 0x0700,
+ ISP_CONTROL_LINK_DN_0 = 0x0800,
+ ISP_CONTROL_LINK_DN_1 = 0x1000,
+ ISP_CONTROL_FSR = 0x2000,
+ ISP_CONTROL_FE = 0x4000,
+ ISP_CONTROL_SR = 0x8000,
+};
+
+/* 32-bit ispInterruptMaskReg */
+enum {
+ ISP_IMR_ENABLE_INT = 0x0004,
+ ISP_IMR_DISABLE_RESET_INT = 0x0008,
+ ISP_IMR_DISABLE_CMPL_INT = 0x0010,
+ ISP_IMR_DISABLE_PROC_INT = 0x0020,
+};
+
+/* 32-bit serialPortInterfaceReg */
+enum {
+ ISP_SERIAL_PORT_IF_CLK = 0x0001,
+ ISP_SERIAL_PORT_IF_CS = 0x0002,
+ ISP_SERIAL_PORT_IF_D0 = 0x0004,
+ ISP_SERIAL_PORT_IF_DI = 0x0008,
+ ISP_NVRAM_MASK = (0x000F << 16),
+ ISP_SERIAL_PORT_IF_WE = 0x0010,
+ ISP_SERIAL_PORT_IF_NVR_MASK = 0x001F,
+ ISP_SERIAL_PORT_IF_SCI = 0x0400,
+ ISP_SERIAL_PORT_IF_SC0 = 0x0800,
+ ISP_SERIAL_PORT_IF_SCE = 0x1000,
+ ISP_SERIAL_PORT_IF_SDI = 0x2000,
+ ISP_SERIAL_PORT_IF_SDO = 0x4000,
+ ISP_SERIAL_PORT_IF_SDE = 0x8000,
+ ISP_SERIAL_PORT_IF_I2C_MASK = 0xFC00,
+};
+
+/* semaphoreReg */
+enum {
+ QL_RESOURCE_MASK_BASE_CODE = 0x7,
+ QL_RESOURCE_BITS_BASE_CODE = 0x4,
+ QL_DRVR_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 1),
+ QL_DDR_RAM_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 4),
+ QL_PHY_GIO_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 7),
+ QL_NVRAM_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 10),
+ QL_FLASH_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 13),
+ QL_DRVR_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (1 + 16)),
+ QL_DDR_RAM_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (4 + 16)),
+ QL_PHY_GIO_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (7 + 16)),
+ QL_NVRAM_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (10 + 16)),
+ QL_FLASH_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (13 + 16)),
+};
+
+ /*
+ * QL3XXX memory-mapped registers
+ * QL3XXX has 4 "pages" of registers, each page occupying
+ * 256 bytes. Each page has a "common" area at the start and then
+ * page-specific registers after that.
+ */
+struct ql3xxx_common_registers {
+ u32 MB0; /* Offset 0x00 */
+ u32 MB1; /* Offset 0x04 */
+ u32 MB2; /* Offset 0x08 */
+ u32 MB3; /* Offset 0x0c */
+ u32 MB4; /* Offset 0x10 */
+ u32 MB5; /* Offset 0x14 */
+ u32 MB6; /* Offset 0x18 */
+ u32 MB7; /* Offset 0x1c */
+ u32 flashBiosAddr;
+ u32 flashBiosData;
+ u32 ispControlStatus;
+ u32 ispInterruptMaskReg;
+ u32 serialPortInterfaceReg;
+ u32 semaphoreReg;
+ u32 reqQProducerIndex;
+ u32 rspQConsumerIndex;
+
+ u32 rxLargeQProducerIndex;
+ u32 rxSmallQProducerIndex;
+ u32 arcMadiCommand;
+ u32 arcMadiData;
+};
+
+enum {
+ EXT_HW_CONFIG_SP_MASK = 0x0006,
+ EXT_HW_CONFIG_SP_NONE = 0x0000,
+ EXT_HW_CONFIG_SP_BYTE_PARITY = 0x0002,
+ EXT_HW_CONFIG_SP_ECC = 0x0004,
+ EXT_HW_CONFIG_SP_ECCx = 0x0006,
+ EXT_HW_CONFIG_SIZE_MASK = 0x0060,
+ EXT_HW_CONFIG_SIZE_128M = 0x0000,
+ EXT_HW_CONFIG_SIZE_256M = 0x0020,
+ EXT_HW_CONFIG_SIZE_512M = 0x0040,
+ EXT_HW_CONFIG_SIZE_INVALID = 0x0060,
+ EXT_HW_CONFIG_PD = 0x0080,
+ EXT_HW_CONFIG_FW = 0x0200,
+ EXT_HW_CONFIG_US = 0x0400,
+ EXT_HW_CONFIG_DCS_MASK = 0x1800,
+ EXT_HW_CONFIG_DCS_9MA = 0x0000,
+ EXT_HW_CONFIG_DCS_15MA = 0x0800,
+ EXT_HW_CONFIG_DCS_18MA = 0x1000,
+ EXT_HW_CONFIG_DCS_24MA = 0x1800,
+ EXT_HW_CONFIG_DDS_MASK = 0x6000,
+ EXT_HW_CONFIG_DDS_9MA = 0x0000,
+ EXT_HW_CONFIG_DDS_15MA = 0x2000,
+ EXT_HW_CONFIG_DDS_18MA = 0x4000,
+ EXT_HW_CONFIG_DDS_24MA = 0x6000,
+};
+
+/* InternalChipConfig */
+enum {
+ INTERNAL_CHIP_DM = 0x0001,
+ INTERNAL_CHIP_SD = 0x0002,
+ INTERNAL_CHIP_RAP_MASK = 0x000C,
+ INTERNAL_CHIP_RAP_RR = 0x0000,
+ INTERNAL_CHIP_RAP_NRM = 0x0004,
+ INTERNAL_CHIP_RAP_ERM = 0x0008,
+ INTERNAL_CHIP_RAP_ERMx = 0x000C,
+ INTERNAL_CHIP_WE = 0x0010,
+ INTERNAL_CHIP_EF = 0x0020,
+ INTERNAL_CHIP_FR = 0x0040,
+ INTERNAL_CHIP_FW = 0x0080,
+ INTERNAL_CHIP_FI = 0x0100,
+ INTERNAL_CHIP_FT = 0x0200,
+};
+
+/* portControl */
+enum {
+ PORT_CONTROL_DS = 0x0001,
+ PORT_CONTROL_HH = 0x0002,
+ PORT_CONTROL_EI = 0x0004,
+ PORT_CONTROL_ET = 0x0008,
+ PORT_CONTROL_EF = 0x0010,
+ PORT_CONTROL_DRM = 0x0020,
+ PORT_CONTROL_RLB = 0x0040,
+ PORT_CONTROL_RCB = 0x0080,
+ PORT_CONTROL_MAC = 0x0100,
+ PORT_CONTROL_IPV = 0x0200,
+ PORT_CONTROL_IFP = 0x0400,
+ PORT_CONTROL_ITP = 0x0800,
+ PORT_CONTROL_FI = 0x1000,
+ PORT_CONTROL_DFP = 0x2000,
+ PORT_CONTROL_OI = 0x4000,
+ PORT_CONTROL_CC = 0x8000,
+};
+
+/* portStatus */
+enum {
+ PORT_STATUS_SM0 = 0x0001,
+ PORT_STATUS_SM1 = 0x0002,
+ PORT_STATUS_X = 0x0008,
+ PORT_STATUS_DL = 0x0080,
+ PORT_STATUS_IC = 0x0200,
+ PORT_STATUS_MRC = 0x0400,
+ PORT_STATUS_NL = 0x0800,
+ PORT_STATUS_REV_ID_MASK = 0x7000,
+ PORT_STATUS_REV_ID_1 = 0x1000,
+ PORT_STATUS_REV_ID_2 = 0x2000,
+ PORT_STATUS_REV_ID_3 = 0x3000,
+ PORT_STATUS_64 = 0x8000,
+ PORT_STATUS_UP0 = 0x10000,
+ PORT_STATUS_AC0 = 0x20000,
+ PORT_STATUS_AE0 = 0x40000,
+ PORT_STATUS_UP1 = 0x100000,
+ PORT_STATUS_AC1 = 0x200000,
+ PORT_STATUS_AE1 = 0x400000,
+ PORT_STATUS_F0_ENABLED = 0x1000000,
+ PORT_STATUS_F1_ENABLED = 0x2000000,
+ PORT_STATUS_F2_ENABLED = 0x4000000,
+ PORT_STATUS_F3_ENABLED = 0x8000000,
+};
+
+/* macMIIMgmtControlReg */
+enum {
+ MAC_ADDR_INDIRECT_PTR_REG_RP_MASK = 0x0003,
+ MAC_ADDR_INDIRECT_PTR_REG_RP_PRI_LWR = 0x0000,
+ MAC_ADDR_INDIRECT_PTR_REG_RP_PRI_UPR = 0x0001,
+ MAC_ADDR_INDIRECT_PTR_REG_RP_SEC_LWR = 0x0002,
+ MAC_ADDR_INDIRECT_PTR_REG_RP_SEC_UPR = 0x0003,
+ MAC_ADDR_INDIRECT_PTR_REG_PR = 0x0008,
+ MAC_ADDR_INDIRECT_PTR_REG_SS = 0x0010,
+ MAC_ADDR_INDIRECT_PTR_REG_SE = 0x0020,
+ MAC_ADDR_INDIRECT_PTR_REG_SP = 0x0040,
+ MAC_ADDR_INDIRECT_PTR_REG_PE = 0x0080,
+};
+
+/* macMIIMgmtControlReg */
+enum {
+ MAC_MII_CONTROL_RC = 0x0001,
+ MAC_MII_CONTROL_SC = 0x0002,
+ MAC_MII_CONTROL_AS = 0x0004,
+ MAC_MII_CONTROL_NP = 0x0008,
+ MAC_MII_CONTROL_CLK_SEL_MASK = 0x0070,
+ MAC_MII_CONTROL_CLK_SEL_DIV2 = 0x0000,
+ MAC_MII_CONTROL_CLK_SEL_DIV4 = 0x0010,
+ MAC_MII_CONTROL_CLK_SEL_DIV6 = 0x0020,
+ MAC_MII_CONTROL_CLK_SEL_DIV8 = 0x0030,
+ MAC_MII_CONTROL_CLK_SEL_DIV10 = 0x0040,
+ MAC_MII_CONTROL_CLK_SEL_DIV14 = 0x0050,
+ MAC_MII_CONTROL_CLK_SEL_DIV20 = 0x0060,
+ MAC_MII_CONTROL_CLK_SEL_DIV28 = 0x0070,
+ MAC_MII_CONTROL_RM = 0x8000,
+};
+
+/* macMIIStatusReg */
+enum {
+ MAC_MII_STATUS_BSY = 0x0001,
+ MAC_MII_STATUS_SC = 0x0002,
+ MAC_MII_STATUS_NV = 0x0004,
+};
+
+enum {
+ MAC_CONFIG_REG_PE = 0x0001,
+ MAC_CONFIG_REG_TF = 0x0002,
+ MAC_CONFIG_REG_RF = 0x0004,
+ MAC_CONFIG_REG_FD = 0x0008,
+ MAC_CONFIG_REG_GM = 0x0010,
+ MAC_CONFIG_REG_LB = 0x0020,
+ MAC_CONFIG_REG_SR = 0x8000,
+};
+
+enum {
+ MAC_HALF_DUPLEX_REG_ED = 0x10000,
+ MAC_HALF_DUPLEX_REG_NB = 0x20000,
+ MAC_HALF_DUPLEX_REG_BNB = 0x40000,
+ MAC_HALF_DUPLEX_REG_ALT = 0x80000,
+};
+
+enum {
+ IP_ADDR_INDEX_REG_MASK = 0x000f,
+ IP_ADDR_INDEX_REG_FUNC_0_PRI = 0x0000,
+ IP_ADDR_INDEX_REG_FUNC_0_SEC = 0x0001,
+ IP_ADDR_INDEX_REG_FUNC_1_PRI = 0x0002,
+ IP_ADDR_INDEX_REG_FUNC_1_SEC = 0x0003,
+ IP_ADDR_INDEX_REG_FUNC_2_PRI = 0x0004,
+ IP_ADDR_INDEX_REG_FUNC_2_SEC = 0x0005,
+ IP_ADDR_INDEX_REG_FUNC_3_PRI = 0x0006,
+ IP_ADDR_INDEX_REG_FUNC_3_SEC = 0x0007,
+ IP_ADDR_INDEX_REG_6 = 0x0008,
+ IP_ADDR_INDEX_REG_OFFSET_MASK = 0x0030,
+ IP_ADDR_INDEX_REG_E = 0x0040,
+};
+enum {
+ QL3032_PORT_CONTROL_DS = 0x0001,
+ QL3032_PORT_CONTROL_HH = 0x0002,
+ QL3032_PORT_CONTROL_EIv6 = 0x0004,
+ QL3032_PORT_CONTROL_EIv4 = 0x0008,
+ QL3032_PORT_CONTROL_ET = 0x0010,
+ QL3032_PORT_CONTROL_EF = 0x0020,
+ QL3032_PORT_CONTROL_DRM = 0x0040,
+ QL3032_PORT_CONTROL_RLB = 0x0080,
+ QL3032_PORT_CONTROL_RCB = 0x0100,
+ QL3032_PORT_CONTROL_KIE = 0x0200,
+};
+
+enum {
+ PROBE_MUX_ADDR_REG_MUX_SEL_MASK = 0x003f,
+ PROBE_MUX_ADDR_REG_SYSCLK = 0x0000,
+ PROBE_MUX_ADDR_REG_PCICLK = 0x0040,
+ PROBE_MUX_ADDR_REG_NRXCLK = 0x0080,
+ PROBE_MUX_ADDR_REG_CPUCLK = 0x00C0,
+ PROBE_MUX_ADDR_REG_MODULE_SEL_MASK = 0x3f00,
+ PROBE_MUX_ADDR_REG_UP = 0x4000,
+ PROBE_MUX_ADDR_REG_RE = 0x8000,
+};
+
+enum {
+ STATISTICS_INDEX_REG_MASK = 0x01ff,
+ STATISTICS_INDEX_REG_MAC0_TX_FRAME = 0x0000,
+ STATISTICS_INDEX_REG_MAC0_TX_BYTES = 0x0001,
+ STATISTICS_INDEX_REG_MAC0_TX_STAT1 = 0x0002,
+ STATISTICS_INDEX_REG_MAC0_TX_STAT2 = 0x0003,
+ STATISTICS_INDEX_REG_MAC0_TX_STAT3 = 0x0004,
+ STATISTICS_INDEX_REG_MAC0_TX_STAT4 = 0x0005,
+ STATISTICS_INDEX_REG_MAC0_TX_STAT5 = 0x0006,
+ STATISTICS_INDEX_REG_MAC0_RX_FRAME = 0x0007,
+ STATISTICS_INDEX_REG_MAC0_RX_BYTES = 0x0008,
+ STATISTICS_INDEX_REG_MAC0_RX_STAT1 = 0x0009,
+ STATISTICS_INDEX_REG_MAC0_RX_STAT2 = 0x000a,
+ STATISTICS_INDEX_REG_MAC0_RX_STAT3 = 0x000b,
+ STATISTICS_INDEX_REG_MAC0_RX_ERR_CRC = 0x000c,
+ STATISTICS_INDEX_REG_MAC0_RX_ERR_ENC = 0x000d,
+ STATISTICS_INDEX_REG_MAC0_RX_ERR_LEN = 0x000e,
+ STATISTICS_INDEX_REG_MAC0_RX_STAT4 = 0x000f,
+ STATISTICS_INDEX_REG_MAC1_TX_FRAME = 0x0010,
+ STATISTICS_INDEX_REG_MAC1_TX_BYTES = 0x0011,
+ STATISTICS_INDEX_REG_MAC1_TX_STAT1 = 0x0012,
+ STATISTICS_INDEX_REG_MAC1_TX_STAT2 = 0x0013,
+ STATISTICS_INDEX_REG_MAC1_TX_STAT3 = 0x0014,
+ STATISTICS_INDEX_REG_MAC1_TX_STAT4 = 0x0015,
+ STATISTICS_INDEX_REG_MAC1_TX_STAT5 = 0x0016,
+ STATISTICS_INDEX_REG_MAC1_RX_FRAME = 0x0017,
+ STATISTICS_INDEX_REG_MAC1_RX_BYTES = 0x0018,
+ STATISTICS_INDEX_REG_MAC1_RX_STAT1 = 0x0019,
+ STATISTICS_INDEX_REG_MAC1_RX_STAT2 = 0x001a,
+ STATISTICS_INDEX_REG_MAC1_RX_STAT3 = 0x001b,
+ STATISTICS_INDEX_REG_MAC1_RX_ERR_CRC = 0x001c,
+ STATISTICS_INDEX_REG_MAC1_RX_ERR_ENC = 0x001d,
+ STATISTICS_INDEX_REG_MAC1_RX_ERR_LEN = 0x001e,
+ STATISTICS_INDEX_REG_MAC1_RX_STAT4 = 0x001f,
+ STATISTICS_INDEX_REG_IP_TX_PKTS = 0x0020,
+ STATISTICS_INDEX_REG_IP_TX_BYTES = 0x0021,
+ STATISTICS_INDEX_REG_IP_TX_FRAG = 0x0022,
+ STATISTICS_INDEX_REG_IP_RX_PKTS = 0x0023,
+ STATISTICS_INDEX_REG_IP_RX_BYTES = 0x0024,
+ STATISTICS_INDEX_REG_IP_RX_FRAG = 0x0025,
+ STATISTICS_INDEX_REG_IP_DGRM_REASSEMBLY = 0x0026,
+ STATISTICS_INDEX_REG_IP_V6_RX_PKTS = 0x0027,
+ STATISTICS_INDEX_REG_IP_RX_PKTERR = 0x0028,
+ STATISTICS_INDEX_REG_IP_REASSEMBLY_ERR = 0x0029,
+ STATISTICS_INDEX_REG_TCP_TX_SEG = 0x0030,
+ STATISTICS_INDEX_REG_TCP_TX_BYTES = 0x0031,
+ STATISTICS_INDEX_REG_TCP_RX_SEG = 0x0032,
+ STATISTICS_INDEX_REG_TCP_RX_BYTES = 0x0033,
+ STATISTICS_INDEX_REG_TCP_TIMER_EXP = 0x0034,
+ STATISTICS_INDEX_REG_TCP_RX_ACK = 0x0035,
+ STATISTICS_INDEX_REG_TCP_TX_ACK = 0x0036,
+ STATISTICS_INDEX_REG_TCP_RX_ERR = 0x0037,
+ STATISTICS_INDEX_REG_TCP_RX_WIN_PROBE = 0x0038,
+ STATISTICS_INDEX_REG_TCP_ECC_ERR_CORR = 0x003f,
+};
+
+enum {
+ PORT_FATAL_ERROR_STATUS_OFB_RE_MAC0 = 0x00000001,
+ PORT_FATAL_ERROR_STATUS_OFB_RE_MAC1 = 0x00000002,
+ PORT_FATAL_ERROR_STATUS_OFB_WE = 0x00000004,
+ PORT_FATAL_ERROR_STATUS_IFB_RE = 0x00000008,
+ PORT_FATAL_ERROR_STATUS_IFB_WE_MAC0 = 0x00000010,
+ PORT_FATAL_ERROR_STATUS_IFB_WE_MAC1 = 0x00000020,
+ PORT_FATAL_ERROR_STATUS_ODE_RE = 0x00000040,
+ PORT_FATAL_ERROR_STATUS_ODE_WE = 0x00000080,
+ PORT_FATAL_ERROR_STATUS_IDE_RE = 0x00000100,
+ PORT_FATAL_ERROR_STATUS_IDE_WE = 0x00000200,
+ PORT_FATAL_ERROR_STATUS_SDE_RE = 0x00000400,
+ PORT_FATAL_ERROR_STATUS_SDE_WE = 0x00000800,
+ PORT_FATAL_ERROR_STATUS_BLE = 0x00001000,
+ PORT_FATAL_ERROR_STATUS_SPE = 0x00002000,
+ PORT_FATAL_ERROR_STATUS_EP0 = 0x00004000,
+ PORT_FATAL_ERROR_STATUS_EP1 = 0x00008000,
+ PORT_FATAL_ERROR_STATUS_ICE = 0x00010000,
+ PORT_FATAL_ERROR_STATUS_ILE = 0x00020000,
+ PORT_FATAL_ERROR_STATUS_OPE = 0x00040000,
+ PORT_FATAL_ERROR_STATUS_TA = 0x00080000,
+ PORT_FATAL_ERROR_STATUS_MA = 0x00100000,
+ PORT_FATAL_ERROR_STATUS_SCE = 0x00200000,
+ PORT_FATAL_ERROR_STATUS_RPE = 0x00400000,
+ PORT_FATAL_ERROR_STATUS_MPE = 0x00800000,
+ PORT_FATAL_ERROR_STATUS_OCE = 0x01000000,
+};
+
+/*
+ * port control and status page - page 0
+ */
+
+struct ql3xxx_port_registers {
+ struct ql3xxx_common_registers CommonRegs;
+
+ u32 ExternalHWConfig;
+ u32 InternalChipConfig;
+ u32 portControl;
+ u32 portStatus;
+ u32 macAddrIndirectPtrReg;
+ u32 macAddrDataReg;
+ u32 macMIIMgmtControlReg;
+ u32 macMIIMgmtAddrReg;
+ u32 macMIIMgmtDataReg;
+ u32 macMIIStatusReg;
+ u32 mac0ConfigReg;
+ u32 mac0IpgIfgReg;
+ u32 mac0HalfDuplexReg;
+ u32 mac0MaxFrameLengthReg;
+ u32 mac0PauseThresholdReg;
+ u32 mac1ConfigReg;
+ u32 mac1IpgIfgReg;
+ u32 mac1HalfDuplexReg;
+ u32 mac1MaxFrameLengthReg;
+ u32 mac1PauseThresholdReg;
+ u32 ipAddrIndexReg;
+ u32 ipAddrDataReg;
+ u32 ipReassemblyTimeout;
+ u32 tcpMaxWindow;
+ u32 currentTcpTimestamp[2];
+ u32 internalRamRWAddrReg;
+ u32 internalRamWDataReg;
+ u32 reclaimedBufferAddrRegLow;
+ u32 reclaimedBufferAddrRegHigh;
+ u32 tcpConfiguration;
+ u32 functionControl;
+ u32 fpgaRevID;
+ u32 localRamAddr;
+ u32 localRamDataAutoIncr;
+ u32 localRamDataNonIncr;
+ u32 gpOutput;
+ u32 gpInput;
+ u32 probeMuxAddr;
+ u32 probeMuxData;
+ u32 statisticsIndexReg;
+ u32 statisticsReadDataRegAutoIncr;
+ u32 statisticsReadDataRegNoIncr;
+ u32 PortFatalErrStatus;
+};
+
+/*
+ * port host memory config page - page 1
+ */
+struct ql3xxx_host_memory_registers {
+ struct ql3xxx_common_registers CommonRegs;
+
+ u32 reserved[12];
+
+ /* Network Request Queue */
+ u32 reqConsumerIndex;
+ u32 reqConsumerIndexAddrLow;
+ u32 reqConsumerIndexAddrHigh;
+ u32 reqBaseAddrLow;
+ u32 reqBaseAddrHigh;
+ u32 reqLength;
+
+ /* Network Completion Queue */
+ u32 rspProducerIndex;
+ u32 rspProducerIndexAddrLow;
+ u32 rspProducerIndexAddrHigh;
+ u32 rspBaseAddrLow;
+ u32 rspBaseAddrHigh;
+ u32 rspLength;
+
+ /* RX Large Buffer Queue */
+ u32 rxLargeQConsumerIndex;
+ u32 rxLargeQBaseAddrLow;
+ u32 rxLargeQBaseAddrHigh;
+ u32 rxLargeQLength;
+ u32 rxLargeBufferLength;
+
+ /* RX Small Buffer Queue */
+ u32 rxSmallQConsumerIndex;
+ u32 rxSmallQBaseAddrLow;
+ u32 rxSmallQBaseAddrHigh;
+ u32 rxSmallQLength;
+ u32 rxSmallBufferLength;
+
+};
+
+/*
+ * port local RAM page - page 2
+ */
+struct ql3xxx_local_ram_registers {
+ struct ql3xxx_common_registers CommonRegs;
+ u32 bufletSize;
+ u32 maxBufletCount;
+ u32 currentBufletCount;
+ u32 reserved;
+ u32 freeBufletThresholdLow;
+ u32 freeBufletThresholdHigh;
+ u32 ipHashTableBase;
+ u32 ipHashTableCount;
+ u32 tcpHashTableBase;
+ u32 tcpHashTableCount;
+ u32 ncbBase;
+ u32 maxNcbCount;
+ u32 currentNcbCount;
+ u32 drbBase;
+ u32 maxDrbCount;
+ u32 currentDrbCount;
+};
+
+/*
+ * definitions for Semaphore bits in Semaphore/Serial NVRAM interface register
+ */
+
+#define LS_64BITS(x) (u32)(0xffffffff & ((u64)x))
+#define MS_64BITS(x) (u32)(0xffffffff & (((u64)x)>>16>>16) )
+
+/*
+ * I/O register
+ */
+
+enum {
+ CONTROL_REG = 0,
+ STATUS_REG = 1,
+ PHY_STAT_LINK_UP = 0x0004,
+ PHY_CTRL_LOOPBACK = 0x4000,
+
+ PETBI_CONTROL_REG = 0x00,
+ PETBI_CTRL_ALL_PARAMS = 0x7140,
+ PETBI_CTRL_SOFT_RESET = 0x8000,
+ PETBI_CTRL_AUTO_NEG = 0x1000,
+ PETBI_CTRL_RESTART_NEG = 0x0200,
+ PETBI_CTRL_FULL_DUPLEX = 0x0100,
+ PETBI_CTRL_SPEED_1000 = 0x0040,
+
+ PETBI_STATUS_REG = 0x01,
+ PETBI_STAT_NEG_DONE = 0x0020,
+ PETBI_STAT_LINK_UP = 0x0004,
+
+ PETBI_NEG_ADVER = 0x04,
+ PETBI_NEG_PAUSE = 0x0080,
+ PETBI_NEG_PAUSE_MASK = 0x0180,
+ PETBI_NEG_DUPLEX = 0x0020,
+ PETBI_NEG_DUPLEX_MASK = 0x0060,
+
+ PETBI_NEG_PARTNER = 0x05,
+ PETBI_NEG_ERROR_MASK = 0x3000,
+
+ PETBI_EXPANSION_REG = 0x06,
+ PETBI_EXP_PAGE_RX = 0x0002,
+
+ PHY_GIG_CONTROL = 9,
+ PHY_GIG_ENABLE_MAN = 0x1000, /* Enable Master/Slave Manual Config*/
+ PHY_GIG_SET_MASTER = 0x0800, /* Set Master (slave if clear)*/
+ PHY_GIG_ALL_PARAMS = 0x0300,
+ PHY_GIG_ADV_1000F = 0x0200,
+ PHY_GIG_ADV_1000H = 0x0100,
+
+ PHY_NEG_ADVER = 4,
+ PHY_NEG_ALL_PARAMS = 0x0fe0,
+ PHY_NEG_ASY_PAUSE = 0x0800,
+ PHY_NEG_SYM_PAUSE = 0x0400,
+ PHY_NEG_ADV_SPEED = 0x01e0,
+ PHY_NEG_ADV_100F = 0x0100,
+ PHY_NEG_ADV_100H = 0x0080,
+ PHY_NEG_ADV_10F = 0x0040,
+ PHY_NEG_ADV_10H = 0x0020,
+
+ PETBI_TBI_CTRL = 0x11,
+ PETBI_TBI_RESET = 0x8000,
+ PETBI_TBI_AUTO_SENSE = 0x0100,
+ PETBI_TBI_SERDES_MODE = 0x0010,
+ PETBI_TBI_SERDES_WRAP = 0x0002,
+
+ AUX_CONTROL_STATUS = 0x1c,
+ PHY_AUX_NEG_DONE = 0x8000,
+ PHY_NEG_PARTNER = 5,
+ PHY_AUX_DUPLEX_STAT = 0x0020,
+ PHY_AUX_SPEED_STAT = 0x0018,
+ PHY_AUX_NO_HW_STRAP = 0x0004,
+ PHY_AUX_RESET_STICK = 0x0002,
+ PHY_NEG_PAUSE = 0x0400,
+ PHY_CTRL_SOFT_RESET = 0x8000,
+ PHY_CTRL_AUTO_NEG = 0x1000,
+ PHY_CTRL_RESTART_NEG = 0x0200,
+};
+enum {
+/* AM29LV Flash definitions */
+ FM93C56A_START = 0x1,
+/* Commands */
+ FM93C56A_READ = 0x2,
+ FM93C56A_WEN = 0x0,
+ FM93C56A_WRITE = 0x1,
+ FM93C56A_WRITE_ALL = 0x0,
+ FM93C56A_WDS = 0x0,
+ FM93C56A_ERASE = 0x3,
+ FM93C56A_ERASE_ALL = 0x0,
+/* Command Extensions */
+ FM93C56A_WEN_EXT = 0x3,
+ FM93C56A_WRITE_ALL_EXT = 0x1,
+ FM93C56A_WDS_EXT = 0x0,
+ FM93C56A_ERASE_ALL_EXT = 0x2,
+/* Special Bits */
+ FM93C56A_READ_DUMMY_BITS = 1,
+ FM93C56A_READY = 0,
+ FM93C56A_BUSY = 1,
+ FM93C56A_CMD_BITS = 2,
+/* AM29LV Flash definitions */
+ FM93C56A_SIZE_8 = 0x100,
+ FM93C56A_SIZE_16 = 0x80,
+ FM93C66A_SIZE_8 = 0x200,
+ FM93C66A_SIZE_16 = 0x100,
+ FM93C86A_SIZE_16 = 0x400,
+/* Address Bits */
+ FM93C56A_NO_ADDR_BITS_16 = 8,
+ FM93C56A_NO_ADDR_BITS_8 = 9,
+ FM93C86A_NO_ADDR_BITS_16 = 10,
+/* Data Bits */
+ FM93C56A_DATA_BITS_16 = 16,
+ FM93C56A_DATA_BITS_8 = 8,
+};
+enum {
+/* Auburn Bits */
+ AUBURN_EEPROM_DI = 0x8,
+ AUBURN_EEPROM_DI_0 = 0x0,
+ AUBURN_EEPROM_DI_1 = 0x8,
+ AUBURN_EEPROM_DO = 0x4,
+ AUBURN_EEPROM_DO_0 = 0x0,
+ AUBURN_EEPROM_DO_1 = 0x4,
+ AUBURN_EEPROM_CS = 0x2,
+ AUBURN_EEPROM_CS_0 = 0x0,
+ AUBURN_EEPROM_CS_1 = 0x2,
+ AUBURN_EEPROM_CLK_RISE = 0x1,
+ AUBURN_EEPROM_CLK_FALL = 0x0,
+};
+enum {EEPROM_SIZE = FM93C86A_SIZE_16,
+ EEPROM_NO_ADDR_BITS = FM93C86A_NO_ADDR_BITS_16,
+ EEPROM_NO_DATA_BITS = FM93C56A_DATA_BITS_16,
+};
+
+/*
+ * MAC Config data structure
+ */
+ struct eeprom_port_cfg {
+ u16 etherMtu_mac;
+ u16 pauseThreshold_mac;
+ u16 resumeThreshold_mac;
+ u16 portConfiguration;
+#define PORT_CONFIG_DEFAULT 0xf700
+#define PORT_CONFIG_AUTO_NEG_ENABLED 0x8000
+#define PORT_CONFIG_SYM_PAUSE_ENABLED 0x4000
+#define PORT_CONFIG_FULL_DUPLEX_ENABLED 0x2000
+#define PORT_CONFIG_HALF_DUPLEX_ENABLED 0x1000
+#define PORT_CONFIG_1000MB_SPEED 0x0400
+#define PORT_CONFIG_100MB_SPEED 0x0200
+#define PORT_CONFIG_10MB_SPEED 0x0100
+#define PORT_CONFIG_LINK_SPEED_MASK 0x0F00
+ u16 reserved[12];
+
+};
+
+/*
+ * BIOS data structure
+ */
+struct eeprom_bios_cfg {
+ u16 SpinDlyEn:1, disBios:1, EnMemMap:1, EnSelectBoot:1, Reserved:12;
+
+ u8 bootID0:7, boodID0Valid:1;
+ u8 bootLun0[8];
+
+ u8 bootID1:7, boodID1Valid:1;
+ u8 bootLun1[8];
+
+ u16 MaxLunsTrgt;
+ u8 reserved[10];
+};
+
+/*
+ * Function Specific Data structure
+ */
+struct eeprom_function_cfg {
+ u8 reserved[30];
+ u16 macAddress[3];
+ u16 macAddressSecondary[3];
+
+ u16 subsysVendorId;
+ u16 subsysDeviceId;
+};
+
+/*
+ * EEPROM format
+ */
+struct eeprom_data {
+ u8 asicId[4];
+ u16 version_and_numPorts; /* together to avoid endianness crap */
+ u16 boardId;
+
+#define EEPROM_BOARDID_STR_SIZE 16
+#define EEPROM_SERIAL_NUM_SIZE 16
+
+ u8 boardIdStr[16];
+ u8 serialNumber[16];
+ u16 extHwConfig;
+ struct eeprom_port_cfg macCfg_port0;
+ struct eeprom_port_cfg macCfg_port1;
+ u16 bufletSize;
+ u16 bufletCount;
+ u16 tcpWindowThreshold50;
+ u16 tcpWindowThreshold25;
+ u16 tcpWindowThreshold0;
+ u16 ipHashTableBaseHi;
+ u16 ipHashTableBaseLo;
+ u16 ipHashTableSize;
+ u16 tcpHashTableBaseHi;
+ u16 tcpHashTableBaseLo;
+ u16 tcpHashTableSize;
+ u16 ncbTableBaseHi;
+ u16 ncbTableBaseLo;
+ u16 ncbTableSize;
+ u16 drbTableBaseHi;
+ u16 drbTableBaseLo;
+ u16 drbTableSize;
+ u16 reserved_142[4];
+ u16 ipReassemblyTimeout;
+ u16 tcpMaxWindowSize;
+ u16 ipSecurity;
+#define IPSEC_CONFIG_PRESENT 0x0001
+ u8 reserved_156[294];
+ u16 qDebug[8];
+ struct eeprom_function_cfg funcCfg_fn0;
+ u16 reserved_510;
+ u8 oemSpace[432];
+ struct eeprom_bios_cfg biosCfg_fn1;
+ struct eeprom_function_cfg funcCfg_fn1;
+ u16 reserved_1022;
+ u8 reserved_1024[464];
+ struct eeprom_function_cfg funcCfg_fn2;
+ u16 reserved_1534;
+ u8 reserved_1536[432];
+ struct eeprom_bios_cfg biosCfg_fn3;
+ struct eeprom_function_cfg funcCfg_fn3;
+ u16 checksum;
+};
+
+/*
+ * General definitions...
+ */
+
+/*
+ * Below are a number compiler switches for controlling driver behavior.
+ * Some are not supported under certain conditions and are notated as such.
+ */
+
+#define QL3XXX_VENDOR_ID 0x1077
+#define QL3022_DEVICE_ID 0x3022
+#define QL3032_DEVICE_ID 0x3032
+
+/* MTU & Frame Size stuff */
+#define NORMAL_MTU_SIZE ETH_DATA_LEN
+#define JUMBO_MTU_SIZE 9000
+#define VLAN_ID_LEN 2
+
+/* Request Queue Related Definitions */
+#define NUM_REQ_Q_ENTRIES 256 /* so that 64 * 64 = 4096 (1 page) */
+
+/* Response Queue Related Definitions */
+#define NUM_RSP_Q_ENTRIES 256 /* so that 256 * 16 = 4096 (1 page) */
+
+/* Transmit and Receive Buffers */
+#define NUM_LBUFQ_ENTRIES 128
+#define JUMBO_NUM_LBUFQ_ENTRIES 32
+#define NUM_SBUFQ_ENTRIES 64
+#define QL_SMALL_BUFFER_SIZE 32
+#define QL_ADDR_ELE_PER_BUFQ_ENTRY \
+(sizeof(struct lrg_buf_q_entry) / sizeof(struct bufq_addr_element))
+ /* Each send has at least control block. This is how many we keep. */
+#define NUM_SMALL_BUFFERS NUM_SBUFQ_ENTRIES * QL_ADDR_ELE_PER_BUFQ_ENTRY
+
+#define QL_HEADER_SPACE 32 /* make header space at top of skb. */
+/*
+ * Large & Small Buffers for Receives
+ */
+struct lrg_buf_q_entry {
+
+ __le32 addr0_lower;
+#define IAL_LAST_ENTRY 0x00000001
+#define IAL_CONT_ENTRY 0x00000002
+#define IAL_FLAG_MASK 0x00000003
+ __le32 addr0_upper;
+ __le32 addr1_lower;
+ __le32 addr1_upper;
+ __le32 addr2_lower;
+ __le32 addr2_upper;
+ __le32 addr3_lower;
+ __le32 addr3_upper;
+ __le32 addr4_lower;
+ __le32 addr4_upper;
+ __le32 addr5_lower;
+ __le32 addr5_upper;
+ __le32 addr6_lower;
+ __le32 addr6_upper;
+ __le32 addr7_lower;
+ __le32 addr7_upper;
+
+};
+
+struct bufq_addr_element {
+ __le32 addr_low;
+ __le32 addr_high;
+};
+
+#define QL_NO_RESET 0
+#define QL_DO_RESET 1
+
+enum link_state_t {
+ LS_UNKNOWN = 0,
+ LS_DOWN,
+ LS_DEGRADE,
+ LS_RECOVER,
+ LS_UP,
+};
+
+struct ql_rcv_buf_cb {
+ struct ql_rcv_buf_cb *next;
+ struct sk_buff *skb;
+ DEFINE_DMA_UNMAP_ADDR(mapaddr);
+ DEFINE_DMA_UNMAP_LEN(maplen);
+ __le32 buf_phy_addr_low;
+ __le32 buf_phy_addr_high;
+ int index;
+};
+
+/*
+ * Original IOCB has 3 sg entries:
+ * first points to skb-data area
+ * second points to first frag
+ * third points to next oal.
+ * OAL has 5 entries:
+ * 1 thru 4 point to frags
+ * fifth points to next oal.
+ */
+#define MAX_OAL_CNT ((MAX_SKB_FRAGS-1)/4 + 1)
+
+struct oal_entry {
+ __le32 dma_lo;
+ __le32 dma_hi;
+ __le32 len;
+#define OAL_LAST_ENTRY 0x80000000 /* Last valid buffer in list. */
+#define OAL_CONT_ENTRY 0x40000000 /* points to an OAL. (continuation) */
+};
+
+struct oal {
+ struct oal_entry oal_entry[5];
+};
+
+struct map_list {
+ DEFINE_DMA_UNMAP_ADDR(mapaddr);
+ DEFINE_DMA_UNMAP_LEN(maplen);
+};
+
+struct ql_tx_buf_cb {
+ struct sk_buff *skb;
+ struct ob_mac_iocb_req *queue_entry ;
+ int seg_count;
+ struct oal *oal;
+ struct map_list map[MAX_SKB_FRAGS+1];
+};
+
+/* definitions for type field */
+#define QL_BUF_TYPE_MACIOCB 0x01
+#define QL_BUF_TYPE_IPIOCB 0x02
+#define QL_BUF_TYPE_TCPIOCB 0x03
+
+/* qdev->flags definitions. */
+enum { QL_RESET_DONE = 1, /* Reset finished. */
+ QL_RESET_ACTIVE = 2, /* Waiting for reset to finish. */
+ QL_RESET_START = 3, /* Please reset the chip. */
+ QL_RESET_PER_SCSI = 4, /* SCSI driver requests reset. */
+ QL_TX_TIMEOUT = 5, /* Timeout in progress. */
+ QL_LINK_MASTER = 6, /* This driver controls the link. */
+ QL_ADAPTER_UP = 7, /* Adapter has been brought up. */
+ QL_THREAD_UP = 8, /* This flag is available. */
+ QL_LINK_UP = 9, /* Link Status. */
+ QL_ALLOC_REQ_RSP_Q_DONE = 10,
+ QL_ALLOC_BUFQS_DONE = 11,
+ QL_ALLOC_SMALL_BUF_DONE = 12,
+ QL_LINK_OPTICAL = 13,
+ QL_MSI_ENABLED = 14,
+};
+
+/*
+ * ql3_adapter - The main Adapter structure definition.
+ * This structure has all fields relevant to the hardware.
+ */
+
+struct ql3_adapter {
+ u32 reserved_00;
+ unsigned long flags;
+
+ /* PCI Configuration information for this device */
+ struct pci_dev *pdev;
+ struct net_device *ndev; /* Parent NET device */
+
+ struct napi_struct napi;
+
+ /* Hardware information */
+ u8 chip_rev_id;
+ u8 pci_slot;
+ u8 pci_width;
+ u8 pci_x;
+ u32 msi;
+ int index;
+ struct timer_list adapter_timer; /* timer used for various functions */
+
+ spinlock_t adapter_lock;
+ spinlock_t hw_lock;
+
+ /* PCI Bus Relative Register Addresses */
+ u8 __iomem *mmap_virt_base; /* stores return value from ioremap() */
+ struct ql3xxx_port_registers __iomem *mem_map_registers;
+ u32 current_page; /* tracks current register page */
+
+ u32 msg_enable;
+ u8 reserved_01[2];
+ u8 reserved_02[2];
+
+ /* Page for Shadow Registers */
+ void *shadow_reg_virt_addr;
+ dma_addr_t shadow_reg_phy_addr;
+
+ /* Net Request Queue */
+ u32 req_q_size;
+ u32 reserved_03;
+ struct ob_mac_iocb_req *req_q_virt_addr;
+ dma_addr_t req_q_phy_addr;
+ u16 req_producer_index;
+ u16 reserved_04;
+ u16 *preq_consumer_index;
+ u32 req_consumer_index_phy_addr_high;
+ u32 req_consumer_index_phy_addr_low;
+ atomic_t tx_count;
+ struct ql_tx_buf_cb tx_buf[NUM_REQ_Q_ENTRIES];
+
+ /* Net Response Queue */
+ u32 rsp_q_size;
+ u32 eeprom_cmd_data;
+ struct net_rsp_iocb *rsp_q_virt_addr;
+ dma_addr_t rsp_q_phy_addr;
+ struct net_rsp_iocb *rsp_current;
+ u16 rsp_consumer_index;
+ u16 reserved_06;
+ volatile __le32 *prsp_producer_index;
+ u32 rsp_producer_index_phy_addr_high;
+ u32 rsp_producer_index_phy_addr_low;
+
+ /* Large Buffer Queue */
+ u32 lrg_buf_q_alloc_size;
+ u32 lrg_buf_q_size;
+ void *lrg_buf_q_alloc_virt_addr;
+ void *lrg_buf_q_virt_addr;
+ dma_addr_t lrg_buf_q_alloc_phy_addr;
+ dma_addr_t lrg_buf_q_phy_addr;
+ u32 lrg_buf_q_producer_index;
+ u32 lrg_buf_release_cnt;
+ struct bufq_addr_element *lrg_buf_next_free;
+ u32 num_large_buffers;
+ u32 num_lbufq_entries;
+
+ /* Large (Receive) Buffers */
+ struct ql_rcv_buf_cb *lrg_buf;
+ struct ql_rcv_buf_cb *lrg_buf_free_head;
+ struct ql_rcv_buf_cb *lrg_buf_free_tail;
+ u32 lrg_buf_free_count;
+ u32 lrg_buffer_len;
+ u32 lrg_buf_index;
+ u32 lrg_buf_skb_check;
+
+ /* Small Buffer Queue */
+ u32 small_buf_q_alloc_size;
+ u32 small_buf_q_size;
+ u32 small_buf_q_producer_index;
+ void *small_buf_q_alloc_virt_addr;
+ void *small_buf_q_virt_addr;
+ dma_addr_t small_buf_q_alloc_phy_addr;
+ dma_addr_t small_buf_q_phy_addr;
+ u32 small_buf_index;
+
+ /* Small (Receive) Buffers */
+ void *small_buf_virt_addr;
+ dma_addr_t small_buf_phy_addr;
+ u32 small_buf_phy_addr_low;
+ u32 small_buf_phy_addr_high;
+ u32 small_buf_release_cnt;
+ u32 small_buf_total_size;
+
+ struct eeprom_data nvram_data;
+ u32 port_link_state;
+
+ /* 4022 specific */
+ u32 mac_index; /* Driver's MAC number can be 0 or 1 for first and second networking functions respectively */
+ u32 PHYAddr; /* Address of PHY 0x1e00 Port 0 and 0x1f00 Port 1 */
+ u32 mac_ob_opcode; /* Opcode to use on mac transmission */
+ u32 mb_bit_mask; /* MA Bits mask to use on transmission */
+ u32 numPorts;
+ struct workqueue_struct *workqueue;
+ struct delayed_work reset_work;
+ struct delayed_work tx_timeout_work;
+ struct delayed_work link_state_work;
+ u32 max_frame_size;
+ u32 device_id;
+ u16 phyType;
+};
+
+#endif /* _QLA3XXX_H_ */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/Makefile b/drivers/net/ethernet/qlogic/qlcnic/Makefile
new file mode 100644
index 000000000..dbaeab344
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/Makefile
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for Qlogic 1G/10G Ethernet Driver for CNA devices
+#
+
+obj-$(CONFIG_QLCNIC) := qlcnic.o
+
+qlcnic-y := qlcnic_hw.o qlcnic_main.o qlcnic_init.o \
+ qlcnic_ethtool.o qlcnic_ctx.o qlcnic_io.o \
+ qlcnic_sysfs.o qlcnic_minidump.o qlcnic_83xx_hw.o \
+ qlcnic_83xx_init.o qlcnic_83xx_vnic.o \
+ qlcnic_sriov_common.o
+
+qlcnic-$(CONFIG_QLCNIC_SRIOV) += qlcnic_sriov_pf.o
+
+qlcnic-$(CONFIG_QLCNIC_DCB) += qlcnic_dcb.o
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
new file mode 100644
index 000000000..b25102fde
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -0,0 +1,2418 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ */
+
+#ifndef _QLCNIC_H_
+#define _QLCNIC_H_
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ip.h>
+#include <linux/in.h>
+#include <linux/tcp.h>
+#include <linux/skbuff.h>
+#include <linux/firmware.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/timer.h>
+#include <linux/irq.h>
+#include <linux/vmalloc.h>
+#include <linux/io.h>
+#include <asm/byteorder.h>
+#include <linux/bitops.h>
+#include <linux/if_vlan.h>
+
+#include "qlcnic_hdr.h"
+#include "qlcnic_hw.h"
+#include "qlcnic_83xx_hw.h"
+#include "qlcnic_dcb.h"
+
+#define _QLCNIC_LINUX_MAJOR 5
+#define _QLCNIC_LINUX_MINOR 3
+#define _QLCNIC_LINUX_SUBVERSION 66
+#define QLCNIC_LINUX_VERSIONID "5.3.66"
+#define QLCNIC_DRV_IDC_VER 0x01
+#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
+ (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
+
+#define QLCNIC_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
+#define _major(v) (((v) >> 24) & 0xff)
+#define _minor(v) (((v) >> 16) & 0xff)
+#define _build(v) ((v) & 0xffff)
+
+/* version in image has weird encoding:
+ * 7:0 - major
+ * 15:8 - minor
+ * 31:16 - build (little endian)
+ */
+#define QLCNIC_DECODE_VERSION(v) \
+ QLCNIC_VERSION_CODE(((v) & 0xff), (((v) >> 8) & 0xff), ((v) >> 16))
+
+#define QLCNIC_MIN_FW_VERSION QLCNIC_VERSION_CODE(4, 4, 2)
+#define QLCNIC_NUM_FLASH_SECTORS (64)
+#define QLCNIC_FLASH_SECTOR_SIZE (64 * 1024)
+#define QLCNIC_FLASH_TOTAL_SIZE (QLCNIC_NUM_FLASH_SECTORS \
+ * QLCNIC_FLASH_SECTOR_SIZE)
+
+#define RCV_DESC_RINGSIZE(rds_ring) \
+ (sizeof(struct rcv_desc) * (rds_ring)->num_desc)
+#define RCV_BUFF_RINGSIZE(rds_ring) \
+ (sizeof(struct qlcnic_rx_buffer) * rds_ring->num_desc)
+#define STATUS_DESC_RINGSIZE(sds_ring) \
+ (sizeof(struct status_desc) * (sds_ring)->num_desc)
+#define TX_BUFF_RINGSIZE(tx_ring) \
+ (sizeof(struct qlcnic_cmd_buffer) * tx_ring->num_desc)
+#define TX_DESC_RINGSIZE(tx_ring) \
+ (sizeof(struct cmd_desc_type0) * tx_ring->num_desc)
+
+#define QLCNIC_P3P_A0 0x50
+#define QLCNIC_P3P_C0 0x58
+
+#define QLCNIC_IS_REVISION_P3P(REVISION) (REVISION >= QLCNIC_P3P_A0)
+
+#define FIRST_PAGE_GROUP_START 0
+#define FIRST_PAGE_GROUP_END 0x100000
+
+#define P3P_MAX_MTU (9600)
+#define P3P_MIN_MTU (68)
+#define QLCNIC_MAX_ETHERHDR 32 /* This contains some padding */
+
+#define QLCNIC_P3P_RX_BUF_MAX_LEN (QLCNIC_MAX_ETHERHDR + ETH_DATA_LEN)
+#define QLCNIC_P3P_RX_JUMBO_BUF_MAX_LEN (QLCNIC_MAX_ETHERHDR + P3P_MAX_MTU)
+#define QLCNIC_CT_DEFAULT_RX_BUF_LEN 2048
+#define QLCNIC_LRO_BUFFER_EXTRA 2048
+
+/* Tx defines */
+#define QLCNIC_MAX_FRAGS_PER_TX 14
+#define MAX_TSO_HEADER_DESC 2
+#define MGMT_CMD_DESC_RESV 4
+#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \
+ + MGMT_CMD_DESC_RESV)
+#define QLCNIC_MAX_TX_TIMEOUTS 2
+
+/* Driver will use 1 Tx ring in INT-x/MSI/SRIOV mode. */
+#define QLCNIC_SINGLE_RING 1
+#define QLCNIC_DEF_SDS_RINGS 4
+#define QLCNIC_DEF_TX_RINGS 4
+#define QLCNIC_MAX_VNIC_TX_RINGS 4
+#define QLCNIC_MAX_VNIC_SDS_RINGS 4
+#define QLCNIC_83XX_MINIMUM_VECTOR 3
+#define QLCNIC_82XX_MINIMUM_VECTOR 2
+
+enum qlcnic_queue_type {
+ QLCNIC_TX_QUEUE = 1,
+ QLCNIC_RX_QUEUE,
+};
+
+/* Operational mode for driver */
+#define QLCNIC_VNIC_MODE 0xFF
+#define QLCNIC_DEFAULT_MODE 0x0
+
+/* Virtual NIC function count */
+#define QLC_DEFAULT_VNIC_COUNT 8
+#define QLC_84XX_VNIC_COUNT 16
+
+/*
+ * Following are the states of the Phantom. Phantom will set them and
+ * Host will read to check if the fields are correct.
+ */
+#define PHAN_INITIALIZE_FAILED 0xffff
+#define PHAN_INITIALIZE_COMPLETE 0xff01
+
+/* Host writes the following to notify that it has done the init-handshake */
+#define PHAN_INITIALIZE_ACK 0xf00f
+#define PHAN_PEG_RCV_INITIALIZED 0xff01
+
+#define NUM_RCV_DESC_RINGS 3
+
+#define RCV_RING_NORMAL 0
+#define RCV_RING_JUMBO 1
+
+#define MIN_CMD_DESCRIPTORS 64
+#define MIN_RCV_DESCRIPTORS 64
+#define MIN_JUMBO_DESCRIPTORS 32
+
+#define MAX_CMD_DESCRIPTORS 1024
+#define MAX_RCV_DESCRIPTORS_1G 4096
+#define MAX_RCV_DESCRIPTORS_10G 8192
+#define MAX_RCV_DESCRIPTORS_VF 2048
+#define MAX_JUMBO_RCV_DESCRIPTORS_1G 512
+#define MAX_JUMBO_RCV_DESCRIPTORS_10G 1024
+
+#define DEFAULT_RCV_DESCRIPTORS_1G 2048
+#define DEFAULT_RCV_DESCRIPTORS_10G 4096
+#define DEFAULT_RCV_DESCRIPTORS_VF 1024
+#define MAX_RDS_RINGS 2
+
+#define get_next_index(index, length) \
+ (((index) + 1) & ((length) - 1))
+
+/*
+ * Following data structures describe the descriptors that will be used.
+ * Added fileds of tcpHdrSize and ipHdrSize, The driver needs to do it only when
+ * we are doing LSO (above the 1500 size packet) only.
+ */
+struct cmd_desc_type0 {
+ u8 tcp_hdr_offset; /* For LSO only */
+ u8 ip_hdr_offset; /* For LSO only */
+ __le16 flags_opcode; /* 15:13 unused, 12:7 opcode, 6:0 flags */
+ __le32 nfrags__length; /* 31:8 total len, 7:0 frag count */
+
+ __le64 addr_buffer2;
+
+ __le16 encap_descr; /* 15:10 offset of outer L3 header,
+ * 9:6 number of 32bit words in outer L3 header,
+ * 5 offload outer L4 checksum,
+ * 4 offload outer L3 checksum,
+ * 3 Inner L4 type, TCP=0, UDP=1,
+ * 2 Inner L3 type, IPv4=0, IPv6=1,
+ * 1 Outer L3 type,IPv4=0, IPv6=1,
+ * 0 type of encapsulation, GRE=0, VXLAN=1
+ */
+ __le16 mss;
+ u8 port_ctxid; /* 7:4 ctxid 3:0 port */
+ u8 hdr_length; /* LSO only : MAC+IP+TCP Hdr size */
+ u8 outer_hdr_length; /* Encapsulation only */
+ u8 rsvd1;
+
+ __le64 addr_buffer3;
+ __le64 addr_buffer1;
+
+ __le16 buffer_length[4];
+
+ __le64 addr_buffer4;
+
+ u8 eth_addr[ETH_ALEN];
+ __le16 vlan_TCI; /* In case of encapsulation,
+ * this is for outer VLAN
+ */
+
+} __attribute__ ((aligned(64)));
+
+/* Note: sizeof(rcv_desc) should always be a mutliple of 2 */
+struct rcv_desc {
+ __le16 reference_handle;
+ __le16 reserved;
+ __le32 buffer_length; /* allocated buffer length (usually 2K) */
+ __le64 addr_buffer;
+} __packed;
+
+struct status_desc {
+ __le64 status_desc_data[2];
+} __attribute__ ((aligned(16)));
+
+/* UNIFIED ROMIMAGE */
+#define QLCNIC_UNI_FW_MIN_SIZE 0xc8000
+#define QLCNIC_UNI_DIR_SECT_PRODUCT_TBL 0x0
+#define QLCNIC_UNI_DIR_SECT_BOOTLD 0x6
+#define QLCNIC_UNI_DIR_SECT_FW 0x7
+
+/*Offsets */
+#define QLCNIC_UNI_CHIP_REV_OFF 10
+#define QLCNIC_UNI_FLAGS_OFF 11
+#define QLCNIC_UNI_BIOS_VERSION_OFF 12
+#define QLCNIC_UNI_BOOTLD_IDX_OFF 27
+#define QLCNIC_UNI_FIRMWARE_IDX_OFF 29
+
+struct uni_table_desc{
+ __le32 findex;
+ __le32 num_entries;
+ __le32 entry_size;
+ __le32 reserved[5];
+};
+
+struct uni_data_desc{
+ __le32 findex;
+ __le32 size;
+ __le32 reserved[5];
+};
+
+/* Flash Defines and Structures */
+#define QLCNIC_FLT_LOCATION 0x3F1000
+#define QLCNIC_FDT_LOCATION 0x3F0000
+#define QLCNIC_B0_FW_IMAGE_REGION 0x74
+#define QLCNIC_C0_FW_IMAGE_REGION 0x97
+#define QLCNIC_BOOTLD_REGION 0X72
+struct qlcnic_flt_header {
+ u16 version;
+ u16 len;
+ u16 checksum;
+ u16 reserved;
+};
+
+struct qlcnic_flt_entry {
+ u8 region;
+ u8 reserved0;
+ u8 attrib;
+ u8 reserved1;
+ u32 size;
+ u32 start_addr;
+ u32 end_addr;
+};
+
+/* Flash Descriptor Table */
+struct qlcnic_fdt {
+ u32 valid;
+ u16 ver;
+ u16 len;
+ u16 cksum;
+ u16 unused;
+ u8 model[16];
+ u8 mfg_id;
+ u16 id;
+ u8 flag;
+ u8 erase_cmd;
+ u8 alt_erase_cmd;
+ u8 write_enable_cmd;
+ u8 write_enable_bits;
+ u8 write_statusreg_cmd;
+ u8 unprotected_sec_cmd;
+ u8 read_manuf_cmd;
+ u32 block_size;
+ u32 alt_block_size;
+ u32 flash_size;
+ u32 write_enable_data;
+ u8 readid_addr_len;
+ u8 write_disable_bits;
+ u8 read_dev_id_len;
+ u8 chip_erase_cmd;
+ u16 read_timeo;
+ u8 protected_sec_cmd;
+ u8 resvd[65];
+};
+/* Magic number to let user know flash is programmed */
+#define QLCNIC_BDINFO_MAGIC 0x12345678
+
+#define QLCNIC_BRDTYPE_P3P_REF_QG 0x0021
+#define QLCNIC_BRDTYPE_P3P_HMEZ 0x0022
+#define QLCNIC_BRDTYPE_P3P_10G_CX4_LP 0x0023
+#define QLCNIC_BRDTYPE_P3P_4_GB 0x0024
+#define QLCNIC_BRDTYPE_P3P_IMEZ 0x0025
+#define QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS 0x0026
+#define QLCNIC_BRDTYPE_P3P_10000_BASE_T 0x0027
+#define QLCNIC_BRDTYPE_P3P_XG_LOM 0x0028
+#define QLCNIC_BRDTYPE_P3P_4_GB_MM 0x0029
+#define QLCNIC_BRDTYPE_P3P_10G_SFP_CT 0x002a
+#define QLCNIC_BRDTYPE_P3P_10G_SFP_QT 0x002b
+#define QLCNIC_BRDTYPE_P3P_10G_CX4 0x0031
+#define QLCNIC_BRDTYPE_P3P_10G_XFP 0x0032
+#define QLCNIC_BRDTYPE_P3P_10G_TP 0x0080
+
+#define QLCNIC_MSIX_TABLE_OFFSET 0x44
+
+/* Flash memory map */
+#define QLCNIC_BRDCFG_START 0x4000 /* board config */
+#define QLCNIC_BOOTLD_START 0x10000 /* bootld */
+#define QLCNIC_IMAGE_START 0x43000 /* compressed image */
+#define QLCNIC_USER_START 0x3E8000 /* Firmware info */
+
+#define QLCNIC_FW_VERSION_OFFSET (QLCNIC_USER_START+0x408)
+#define QLCNIC_FW_SIZE_OFFSET (QLCNIC_USER_START+0x40c)
+#define QLCNIC_FW_SERIAL_NUM_OFFSET (QLCNIC_USER_START+0x81c)
+#define QLCNIC_BIOS_VERSION_OFFSET (QLCNIC_USER_START+0x83c)
+
+#define QLCNIC_BRDTYPE_OFFSET (QLCNIC_BRDCFG_START+0x8)
+#define QLCNIC_FW_MAGIC_OFFSET (QLCNIC_BRDCFG_START+0x128)
+
+#define QLCNIC_FW_MIN_SIZE (0x3fffff)
+#define QLCNIC_UNIFIED_ROMIMAGE 0
+#define QLCNIC_FLASH_ROMIMAGE 1
+#define QLCNIC_UNKNOWN_ROMIMAGE 0xff
+
+#define QLCNIC_UNIFIED_ROMIMAGE_NAME "phanfw.bin"
+#define QLCNIC_FLASH_ROMIMAGE_NAME "flash"
+
+extern char qlcnic_driver_name[];
+
+extern int qlcnic_use_msi;
+extern int qlcnic_use_msi_x;
+extern int qlcnic_auto_fw_reset;
+extern int qlcnic_load_fw_file;
+
+/* Number of status descriptors to handle per interrupt */
+#define MAX_STATUS_HANDLE (64)
+
+/*
+ * qlcnic_skb_frag{} is to contain mapping info for each SG list. This
+ * has to be freed when DMA is complete. This is part of qlcnic_tx_buffer{}.
+ */
+struct qlcnic_skb_frag {
+ u64 dma;
+ u64 length;
+};
+
+/* Following defines are for the state of the buffers */
+#define QLCNIC_BUFFER_FREE 0
+#define QLCNIC_BUFFER_BUSY 1
+
+/*
+ * There will be one qlcnic_buffer per skb packet. These will be
+ * used to save the dma info for pci_unmap_page()
+ */
+struct qlcnic_cmd_buffer {
+ struct sk_buff *skb;
+ struct qlcnic_skb_frag frag_array[MAX_SKB_FRAGS + 1];
+ u32 frag_count;
+};
+
+/* In rx_buffer, we do not need multiple fragments as is a single buffer */
+struct qlcnic_rx_buffer {
+ u16 ref_handle;
+ struct sk_buff *skb;
+ struct list_head list;
+ u64 dma;
+};
+
+/* Board types */
+#define QLCNIC_GBE 0x01
+#define QLCNIC_XGBE 0x02
+
+/*
+ * Interrupt coalescing defaults. The defaults are for 1500 MTU. It is
+ * adjusted based on configured MTU.
+ */
+#define QLCNIC_INTR_COAL_TYPE_RX 1
+#define QLCNIC_INTR_COAL_TYPE_TX 2
+#define QLCNIC_INTR_COAL_TYPE_RX_TX 3
+
+#define QLCNIC_DEF_INTR_COALESCE_RX_TIME_US 3
+#define QLCNIC_DEF_INTR_COALESCE_RX_PACKETS 256
+
+#define QLCNIC_DEF_INTR_COALESCE_TX_TIME_US 64
+#define QLCNIC_DEF_INTR_COALESCE_TX_PACKETS 64
+
+#define QLCNIC_INTR_DEFAULT 0x04
+#define QLCNIC_CONFIG_INTR_COALESCE 3
+#define QLCNIC_DEV_INFO_SIZE 2
+
+struct qlcnic_nic_intr_coalesce {
+ u8 type;
+ u8 sts_ring_mask;
+ u16 rx_packets;
+ u16 rx_time_us;
+ u16 tx_packets;
+ u16 tx_time_us;
+ u16 flag;
+ u32 timer_out;
+};
+
+struct qlcnic_83xx_dump_template_hdr {
+ u32 type;
+ u32 offset;
+ u32 size;
+ u32 cap_mask;
+ u32 num_entries;
+ u32 version;
+ u32 timestamp;
+ u32 checksum;
+ u32 drv_cap_mask;
+ u32 sys_info[3];
+ u32 saved_state[16];
+ u32 cap_sizes[8];
+ u32 ocm_wnd_reg[16];
+ u32 rsvd[];
+};
+
+struct qlcnic_82xx_dump_template_hdr {
+ u32 type;
+ u32 offset;
+ u32 size;
+ u32 cap_mask;
+ u32 num_entries;
+ u32 version;
+ u32 timestamp;
+ u32 checksum;
+ u32 drv_cap_mask;
+ u32 sys_info[3];
+ u32 saved_state[16];
+ u32 cap_sizes[8];
+ u32 rsvd[7];
+ u32 capabilities;
+ u32 rsvd1[];
+};
+
+#define QLC_PEX_DMA_READ_SIZE (PAGE_SIZE * 16)
+
+struct qlcnic_fw_dump {
+ u8 clr; /* flag to indicate if dump is cleared */
+ bool enable; /* enable/disable dump */
+ u32 size; /* total size of the dump */
+ u32 cap_mask; /* Current capture mask */
+ void *data; /* dump data area */
+ void *tmpl_hdr;
+ dma_addr_t phys_addr;
+ void *dma_buffer;
+ bool use_pex_dma;
+ /* Read only elements which are common between 82xx and 83xx
+ * template header. Update these values immediately after we read
+ * template header from Firmware
+ */
+ u32 tmpl_hdr_size;
+ u32 version;
+ u32 num_entries;
+ u32 offset;
+};
+
+/*
+ * One hardware_context{} per adapter
+ * contains interrupt info as well shared hardware info.
+ */
+struct qlcnic_hardware_context {
+ void __iomem *pci_base0;
+ void __iomem *ocm_win_crb;
+
+ unsigned long pci_len0;
+
+ rwlock_t crb_lock;
+ struct mutex mem_lock;
+
+ u8 revision_id;
+ u8 pci_func;
+ u8 linkup;
+ u8 loopback_state;
+ u8 beacon_state;
+ u8 has_link_events;
+ u8 fw_type;
+ u8 physical_port;
+ u8 reset_context;
+ u8 msix_supported;
+ u8 max_mac_filters;
+ u8 mc_enabled;
+ u8 max_mc_count;
+ u8 diag_test;
+ u8 num_msix;
+ u8 nic_mode;
+ int diag_cnt;
+
+ u16 max_uc_count;
+ u16 port_type;
+ u16 board_type;
+ u16 supported_type;
+
+ u32 link_speed;
+ u16 link_duplex;
+ u16 link_autoneg;
+ u16 module_type;
+
+ u16 op_mode;
+ u16 switch_mode;
+ u16 max_tx_ques;
+ u16 max_rx_ques;
+ u16 max_mtu;
+ u32 msg_enable;
+ u16 total_nic_func;
+ u16 max_pci_func;
+ u32 max_vnic_func;
+ u32 total_pci_func;
+
+ u32 capabilities;
+ u32 extra_capability[3];
+ u32 temp;
+ u32 int_vec_bit;
+ u32 fw_hal_version;
+ u32 port_config;
+ struct qlcnic_hardware_ops *hw_ops;
+ struct qlcnic_nic_intr_coalesce coal;
+ struct qlcnic_fw_dump fw_dump;
+ struct qlcnic_fdt fdt;
+ struct qlc_83xx_reset reset;
+ struct qlc_83xx_idc idc;
+ struct qlc_83xx_fw_info *fw_info;
+ struct qlcnic_intrpt_config *intr_tbl;
+ struct qlcnic_sriov *sriov;
+ u32 *reg_tbl;
+ u32 *ext_reg_tbl;
+ u32 mbox_aen[QLC_83XX_MBX_AEN_CNT];
+ u32 mbox_reg[4];
+ struct qlcnic_mailbox *mailbox;
+ u8 extend_lb_time;
+ u8 phys_port_id[ETH_ALEN];
+ u8 lb_mode;
+ struct device *hwmon_dev;
+ u32 post_mode;
+ bool run_post;
+};
+
+struct qlcnic_adapter_stats {
+ u64 xmitcalled;
+ u64 xmitfinished;
+ u64 rxdropped;
+ u64 txdropped;
+ u64 csummed;
+ u64 rx_pkts;
+ u64 lro_pkts;
+ u64 rxbytes;
+ u64 txbytes;
+ u64 lrobytes;
+ u64 lso_frames;
+ u64 encap_lso_frames;
+ u64 encap_tx_csummed;
+ u64 encap_rx_csummed;
+ u64 xmit_on;
+ u64 xmit_off;
+ u64 skb_alloc_failure;
+ u64 null_rxbuf;
+ u64 rx_dma_map_error;
+ u64 tx_dma_map_error;
+ u64 spurious_intr;
+ u64 mac_filter_limit_overrun;
+ u64 mbx_spurious_intr;
+};
+
+/*
+ * Rcv Descriptor Context. One such per Rcv Descriptor. There may
+ * be one Rcv Descriptor for normal packets, one for jumbo and may be others.
+ */
+struct qlcnic_host_rds_ring {
+ void __iomem *crb_rcv_producer;
+ struct rcv_desc *desc_head;
+ struct qlcnic_rx_buffer *rx_buf_arr;
+ u32 num_desc;
+ u32 producer;
+ u32 dma_size;
+ u32 skb_size;
+ u32 flags;
+ struct list_head free_list;
+ spinlock_t lock;
+ dma_addr_t phys_addr;
+} ____cacheline_internodealigned_in_smp;
+
+struct qlcnic_host_sds_ring {
+ u32 consumer;
+ u32 num_desc;
+ void __iomem *crb_sts_consumer;
+
+ struct qlcnic_host_tx_ring *tx_ring;
+ struct status_desc *desc_head;
+ struct qlcnic_adapter *adapter;
+ struct napi_struct napi;
+ struct list_head free_list[NUM_RCV_DESC_RINGS];
+
+ void __iomem *crb_intr_mask;
+ int irq;
+
+ dma_addr_t phys_addr;
+ char name[IFNAMSIZ + 12];
+} ____cacheline_internodealigned_in_smp;
+
+struct qlcnic_tx_queue_stats {
+ u64 xmit_on;
+ u64 xmit_off;
+ u64 xmit_called;
+ u64 xmit_finished;
+ u64 tx_bytes;
+};
+
+struct qlcnic_host_tx_ring {
+ int irq;
+ void __iomem *crb_intr_mask;
+ char name[IFNAMSIZ + 12];
+ u16 ctx_id;
+
+ u32 state;
+ u32 producer;
+ u32 sw_consumer;
+ u32 num_desc;
+
+ struct qlcnic_tx_queue_stats tx_stats;
+
+ void __iomem *crb_cmd_producer;
+ struct cmd_desc_type0 *desc_head;
+ struct qlcnic_adapter *adapter;
+ struct napi_struct napi;
+ struct qlcnic_cmd_buffer *cmd_buf_arr;
+ __le32 *hw_consumer;
+
+ dma_addr_t phys_addr;
+ dma_addr_t hw_cons_phys_addr;
+ struct netdev_queue *txq;
+ /* Lock to protect Tx descriptors cleanup */
+ spinlock_t tx_clean_lock;
+} ____cacheline_internodealigned_in_smp;
+
+/*
+ * Receive context. There is one such structure per instance of the
+ * receive processing. Any state information that is relevant to
+ * the receive, and is must be in this structure. The global data may be
+ * present elsewhere.
+ */
+struct qlcnic_recv_context {
+ struct qlcnic_host_rds_ring *rds_rings;
+ struct qlcnic_host_sds_ring *sds_rings;
+ u32 state;
+ u16 context_id;
+ u16 virt_port;
+};
+
+/* HW context creation */
+
+#define QLCNIC_OS_CRB_RETRY_COUNT 4000
+
+#define QLCNIC_CDRP_CMD_BIT 0x80000000
+
+/*
+ * All responses must have the QLCNIC_CDRP_CMD_BIT cleared
+ * in the crb QLCNIC_CDRP_CRB_OFFSET.
+ */
+#define QLCNIC_CDRP_FORM_RSP(rsp) (rsp)
+#define QLCNIC_CDRP_IS_RSP(rsp) (((rsp) & QLCNIC_CDRP_CMD_BIT) == 0)
+
+#define QLCNIC_CDRP_RSP_OK 0x00000001
+#define QLCNIC_CDRP_RSP_FAIL 0x00000002
+#define QLCNIC_CDRP_RSP_TIMEOUT 0x00000003
+
+/*
+ * All commands must have the QLCNIC_CDRP_CMD_BIT set in
+ * the crb QLCNIC_CDRP_CRB_OFFSET.
+ */
+#define QLCNIC_CDRP_FORM_CMD(cmd) (QLCNIC_CDRP_CMD_BIT | (cmd))
+
+#define QLCNIC_RCODE_SUCCESS 0
+#define QLCNIC_RCODE_INVALID_ARGS 6
+#define QLCNIC_RCODE_NOT_SUPPORTED 9
+#define QLCNIC_RCODE_NOT_PERMITTED 10
+#define QLCNIC_RCODE_NOT_IMPL 15
+#define QLCNIC_RCODE_INVALID 16
+#define QLCNIC_RCODE_TIMEOUT 17
+#define QLCNIC_DESTROY_CTX_RESET 0
+
+/*
+ * Capabilities Announced
+ */
+#define QLCNIC_CAP0_LEGACY_CONTEXT (1)
+#define QLCNIC_CAP0_LEGACY_MN (1 << 2)
+#define QLCNIC_CAP0_LSO (1 << 6)
+#define QLCNIC_CAP0_JUMBO_CONTIGUOUS (1 << 7)
+#define QLCNIC_CAP0_LRO_CONTIGUOUS (1 << 8)
+#define QLCNIC_CAP0_VALIDOFF (1 << 11)
+#define QLCNIC_CAP0_LRO_MSS (1 << 21)
+#define QLCNIC_CAP0_TX_MULTI (1 << 22)
+
+/*
+ * Context state
+ */
+#define QLCNIC_HOST_CTX_STATE_FREED 0
+#define QLCNIC_HOST_CTX_STATE_ACTIVE 2
+
+/*
+ * Rx context
+ */
+
+struct qlcnic_hostrq_sds_ring {
+ __le64 host_phys_addr; /* Ring base addr */
+ __le32 ring_size; /* Ring entries */
+ __le16 msi_index;
+ __le16 rsvd; /* Padding */
+} __packed;
+
+struct qlcnic_hostrq_rds_ring {
+ __le64 host_phys_addr; /* Ring base addr */
+ __le64 buff_size; /* Packet buffer size */
+ __le32 ring_size; /* Ring entries */
+ __le32 ring_kind; /* Class of ring */
+} __packed;
+
+struct qlcnic_hostrq_rx_ctx {
+ __le64 host_rsp_dma_addr; /* Response dma'd here */
+ __le32 capabilities[4]; /* Flag bit vector */
+ __le32 host_int_crb_mode; /* Interrupt crb usage */
+ __le32 host_rds_crb_mode; /* RDS crb usage */
+ /* These ring offsets are relative to data[0] below */
+ __le32 rds_ring_offset; /* Offset to RDS config */
+ __le32 sds_ring_offset; /* Offset to SDS config */
+ __le16 num_rds_rings; /* Count of RDS rings */
+ __le16 num_sds_rings; /* Count of SDS rings */
+ __le16 valid_field_offset;
+ u8 txrx_sds_binding;
+ u8 msix_handler;
+ u8 reserved[128]; /* reserve space for future expansion*/
+ /* MUST BE 64-bit aligned.
+ The following is packed:
+ - N hostrq_rds_rings
+ - N hostrq_sds_rings */
+ char data[];
+} __packed;
+
+struct qlcnic_cardrsp_rds_ring{
+ __le32 host_producer_crb; /* Crb to use */
+ __le32 rsvd1; /* Padding */
+} __packed;
+
+struct qlcnic_cardrsp_sds_ring {
+ __le32 host_consumer_crb; /* Crb to use */
+ __le32 interrupt_crb; /* Crb to use */
+} __packed;
+
+struct qlcnic_cardrsp_rx_ctx {
+ /* These ring offsets are relative to data[0] below */
+ __le32 rds_ring_offset; /* Offset to RDS config */
+ __le32 sds_ring_offset; /* Offset to SDS config */
+ __le32 host_ctx_state; /* Starting State */
+ __le32 num_fn_per_port; /* How many PCI fn share the port */
+ __le16 num_rds_rings; /* Count of RDS rings */
+ __le16 num_sds_rings; /* Count of SDS rings */
+ __le16 context_id; /* Handle for context */
+ u8 phys_port; /* Physical id of port */
+ u8 virt_port; /* Virtual/Logical id of port */
+ u8 reserved[128]; /* save space for future expansion */
+ /* MUST BE 64-bit aligned.
+ The following is packed:
+ - N cardrsp_rds_rings
+ - N cardrs_sds_rings */
+ char data[];
+} __packed;
+
+#define SIZEOF_HOSTRQ_RX(HOSTRQ_RX, rds_rings, sds_rings) \
+ (sizeof(HOSTRQ_RX) + \
+ (rds_rings)*(sizeof(struct qlcnic_hostrq_rds_ring)) + \
+ (sds_rings)*(sizeof(struct qlcnic_hostrq_sds_ring)))
+
+#define SIZEOF_CARDRSP_RX(CARDRSP_RX, rds_rings, sds_rings) \
+ (sizeof(CARDRSP_RX) + \
+ (rds_rings)*(sizeof(struct qlcnic_cardrsp_rds_ring)) + \
+ (sds_rings)*(sizeof(struct qlcnic_cardrsp_sds_ring)))
+
+/*
+ * Tx context
+ */
+
+struct qlcnic_hostrq_cds_ring {
+ __le64 host_phys_addr; /* Ring base addr */
+ __le32 ring_size; /* Ring entries */
+ __le32 rsvd; /* Padding */
+} __packed;
+
+struct qlcnic_hostrq_tx_ctx {
+ __le64 host_rsp_dma_addr; /* Response dma'd here */
+ __le64 cmd_cons_dma_addr; /* */
+ __le64 dummy_dma_addr; /* */
+ __le32 capabilities[4]; /* Flag bit vector */
+ __le32 host_int_crb_mode; /* Interrupt crb usage */
+ __le32 rsvd1; /* Padding */
+ __le16 rsvd2; /* Padding */
+ __le16 interrupt_ctl;
+ __le16 msi_index;
+ __le16 rsvd3; /* Padding */
+ struct qlcnic_hostrq_cds_ring cds_ring; /* Desc of cds ring */
+ u8 reserved[128]; /* future expansion */
+} __packed;
+
+struct qlcnic_cardrsp_cds_ring {
+ __le32 host_producer_crb; /* Crb to use */
+ __le32 interrupt_crb; /* Crb to use */
+} __packed;
+
+struct qlcnic_cardrsp_tx_ctx {
+ __le32 host_ctx_state; /* Starting state */
+ __le16 context_id; /* Handle for context */
+ u8 phys_port; /* Physical id of port */
+ u8 virt_port; /* Virtual/Logical id of port */
+ struct qlcnic_cardrsp_cds_ring cds_ring; /* Card cds settings */
+ u8 reserved[128]; /* future expansion */
+} __packed;
+
+#define SIZEOF_HOSTRQ_TX(HOSTRQ_TX) (sizeof(HOSTRQ_TX))
+#define SIZEOF_CARDRSP_TX(CARDRSP_TX) (sizeof(CARDRSP_TX))
+
+/* CRB */
+
+#define QLCNIC_HOST_RDS_CRB_MODE_UNIQUE 0
+#define QLCNIC_HOST_RDS_CRB_MODE_SHARED 1
+#define QLCNIC_HOST_RDS_CRB_MODE_CUSTOM 2
+#define QLCNIC_HOST_RDS_CRB_MODE_MAX 3
+
+#define QLCNIC_HOST_INT_CRB_MODE_UNIQUE 0
+#define QLCNIC_HOST_INT_CRB_MODE_SHARED 1
+#define QLCNIC_HOST_INT_CRB_MODE_NORX 2
+#define QLCNIC_HOST_INT_CRB_MODE_NOTX 3
+#define QLCNIC_HOST_INT_CRB_MODE_NORXTX 4
+
+
+/* MAC */
+
+#define MC_COUNT_P3P 38
+
+#define QLCNIC_MAC_NOOP 0
+#define QLCNIC_MAC_ADD 1
+#define QLCNIC_MAC_DEL 2
+#define QLCNIC_MAC_VLAN_ADD 3
+#define QLCNIC_MAC_VLAN_DEL 4
+
+enum qlcnic_mac_type {
+ QLCNIC_UNICAST_MAC,
+ QLCNIC_MULTICAST_MAC,
+ QLCNIC_BROADCAST_MAC,
+};
+
+struct qlcnic_mac_vlan_list {
+ struct list_head list;
+ uint8_t mac_addr[ETH_ALEN+2];
+ u16 vlan_id;
+ enum qlcnic_mac_type mac_type;
+};
+
+/* MAC Learn */
+#define NO_MAC_LEARN 0
+#define DRV_MAC_LEARN 1
+#define FDB_MAC_LEARN 2
+
+#define QLCNIC_HOST_REQUEST 0x13
+#define QLCNIC_REQUEST 0x14
+
+#define QLCNIC_MAC_EVENT 0x1
+
+#define QLCNIC_IP_UP 2
+#define QLCNIC_IP_DOWN 3
+
+#define QLCNIC_ILB_MODE 0x1
+#define QLCNIC_ELB_MODE 0x2
+#define QLCNIC_LB_MODE_MASK 0x3
+
+#define QLCNIC_LINKEVENT 0x1
+#define QLCNIC_LB_RESPONSE 0x2
+#define QLCNIC_IS_LB_CONFIGURED(VAL) \
+ (VAL == (QLCNIC_LINKEVENT | QLCNIC_LB_RESPONSE))
+
+/*
+ * Driver --> Firmware
+ */
+#define QLCNIC_H2C_OPCODE_CONFIG_RSS 0x1
+#define QLCNIC_H2C_OPCODE_CONFIG_INTR_COALESCE 0x3
+#define QLCNIC_H2C_OPCODE_CONFIG_LED 0x4
+#define QLCNIC_H2C_OPCODE_LRO_REQUEST 0x7
+#define QLCNIC_H2C_OPCODE_SET_MAC_RECEIVE_MODE 0xc
+#define QLCNIC_H2C_OPCODE_CONFIG_IPADDR 0x12
+
+#define QLCNIC_H2C_OPCODE_GET_LINKEVENT 0x15
+#define QLCNIC_H2C_OPCODE_CONFIG_BRIDGING 0x17
+#define QLCNIC_H2C_OPCODE_CONFIG_HW_LRO 0x18
+#define QLCNIC_H2C_OPCODE_CONFIG_LOOPBACK 0x13
+
+/*
+ * Firmware --> Driver
+ */
+
+#define QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK 0x8f
+#define QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE 0x8D
+#define QLCNIC_C2H_OPCODE_GET_DCB_AEN 0x90
+
+#define VPORT_MISS_MODE_DROP 0 /* drop all unmatched */
+#define VPORT_MISS_MODE_ACCEPT_ALL 1 /* accept all packets */
+#define VPORT_MISS_MODE_ACCEPT_MULTI 2 /* accept unmatched multicast */
+
+#define QLCNIC_LRO_REQUEST_CLEANUP 4
+
+/* Capabilites received */
+#define QLCNIC_FW_CAPABILITY_TSO BIT_1
+#define QLCNIC_FW_CAPABILITY_BDG BIT_8
+#define QLCNIC_FW_CAPABILITY_FVLANTX BIT_9
+#define QLCNIC_FW_CAPABILITY_HW_LRO BIT_10
+#define QLCNIC_FW_CAPABILITY_2_MULTI_TX BIT_4
+#define QLCNIC_FW_CAPABILITY_MULTI_LOOPBACK BIT_27
+#define QLCNIC_FW_CAPABILITY_MORE_CAPS BIT_31
+
+#define QLCNIC_FW_CAPABILITY_2_LRO_MAX_TCP_SEG BIT_2
+#define QLCNIC_FW_CAP2_HW_LRO_IPV6 BIT_3
+#define QLCNIC_FW_CAPABILITY_SET_DRV_VER BIT_5
+#define QLCNIC_FW_CAPABILITY_2_BEACON BIT_7
+#define QLCNIC_FW_CAPABILITY_2_PER_PORT_ESWITCH_CFG BIT_9
+#define QLCNIC_FW_CAPABILITY_2_EXT_ISCSI_DUMP BIT_13
+
+#define QLCNIC_83XX_FW_CAPAB_ENCAP_RX_OFFLOAD BIT_0
+#define QLCNIC_83XX_FW_CAPAB_ENCAP_TX_OFFLOAD BIT_1
+#define QLCNIC_83XX_FW_CAPAB_ENCAP_CKO_OFFLOAD BIT_4
+
+/* module types */
+#define LINKEVENT_MODULE_NOT_PRESENT 1
+#define LINKEVENT_MODULE_OPTICAL_UNKNOWN 2
+#define LINKEVENT_MODULE_OPTICAL_SRLR 3
+#define LINKEVENT_MODULE_OPTICAL_LRM 4
+#define LINKEVENT_MODULE_OPTICAL_SFP_1G 5
+#define LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE 6
+#define LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN 7
+#define LINKEVENT_MODULE_TWINAX 8
+
+#define LINKSPEED_10GBPS 10000
+#define LINKSPEED_1GBPS 1000
+#define LINKSPEED_100MBPS 100
+#define LINKSPEED_10MBPS 10
+
+#define LINKSPEED_ENCODED_10MBPS 0
+#define LINKSPEED_ENCODED_100MBPS 1
+#define LINKSPEED_ENCODED_1GBPS 2
+
+#define LINKEVENT_AUTONEG_DISABLED 0
+#define LINKEVENT_AUTONEG_ENABLED 1
+
+#define LINKEVENT_HALF_DUPLEX 0
+#define LINKEVENT_FULL_DUPLEX 1
+
+#define LINKEVENT_LINKSPEED_MBPS 0
+#define LINKEVENT_LINKSPEED_ENCODED 1
+
+/* firmware response header:
+ * 63:58 - message type
+ * 57:56 - owner
+ * 55:53 - desc count
+ * 52:48 - reserved
+ * 47:40 - completion id
+ * 39:32 - opcode
+ * 31:16 - error code
+ * 15:00 - reserved
+ */
+#define qlcnic_get_nic_msg_opcode(msg_hdr) \
+ ((msg_hdr >> 32) & 0xFF)
+
+struct qlcnic_fw_msg {
+ union {
+ struct {
+ u64 hdr;
+ u64 body[7];
+ };
+ u64 words[8];
+ };
+};
+
+struct qlcnic_nic_req {
+ __le64 qhdr;
+ __le64 req_hdr;
+ __le64 words[6];
+} __packed;
+
+struct qlcnic_mac_req {
+ u8 op;
+ u8 tag;
+ u8 mac_addr[6];
+};
+
+struct qlcnic_vlan_req {
+ __le16 vlan_id;
+ __le16 rsvd[3];
+} __packed;
+
+struct qlcnic_ipaddr {
+ __be32 ipv4;
+ __be32 ipv6[4];
+};
+
+#define QLCNIC_MSI_ENABLED 0x02
+#define QLCNIC_MSIX_ENABLED 0x04
+#define QLCNIC_LRO_ENABLED 0x01
+#define QLCNIC_LRO_DISABLED 0x00
+#define QLCNIC_BRIDGE_ENABLED 0X10
+#define QLCNIC_DIAG_ENABLED 0x20
+#define QLCNIC_ESWITCH_ENABLED 0x40
+#define QLCNIC_ADAPTER_INITIALIZED 0x80
+#define QLCNIC_TAGGING_ENABLED 0x100
+#define QLCNIC_MACSPOOF 0x200
+#define QLCNIC_MAC_OVERRIDE_DISABLED 0x400
+#define QLCNIC_PROMISC_DISABLED 0x800
+#define QLCNIC_NEED_FLR 0x1000
+#define QLCNIC_FW_RESET_OWNER 0x2000
+#define QLCNIC_FW_HANG 0x4000
+#define QLCNIC_FW_LRO_MSS_CAP 0x8000
+#define QLCNIC_TX_INTR_SHARED 0x10000
+#define QLCNIC_APP_CHANGED_FLAGS 0x20000
+#define QLCNIC_HAS_PHYS_PORT_ID 0x40000
+#define QLCNIC_TSS_RSS 0x80000
+
+#define QLCNIC_VLAN_FILTERING 0x800000
+
+#define QLCNIC_IS_MSI_FAMILY(adapter) \
+ ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
+#define QLCNIC_IS_TSO_CAPABLE(adapter) \
+ ((adapter)->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO)
+
+#define QLCNIC_BEACON_EANBLE 0xC
+#define QLCNIC_BEACON_DISABLE 0xD
+
+#define QLCNIC_BEACON_ON 2
+#define QLCNIC_BEACON_OFF 0
+
+#define QLCNIC_MSIX_TBL_SPACE 8192
+#define QLCNIC_PCI_REG_MSIX_TBL 0x44
+#define QLCNIC_MSIX_TBL_PGSIZE 4096
+
+#define QLCNIC_ADAPTER_UP_MAGIC 777
+
+#define __QLCNIC_FW_ATTACHED 0
+#define __QLCNIC_DEV_UP 1
+#define __QLCNIC_RESETTING 2
+#define __QLCNIC_START_FW 4
+#define __QLCNIC_AER 5
+#define __QLCNIC_DIAG_RES_ALLOC 6
+#define __QLCNIC_LED_ENABLE 7
+#define __QLCNIC_ELB_INPROGRESS 8
+#define __QLCNIC_MULTI_TX_UNIQUE 9
+#define __QLCNIC_SRIOV_ENABLE 10
+#define __QLCNIC_SRIOV_CAPABLE 11
+#define __QLCNIC_MBX_POLL_ENABLE 12
+#define __QLCNIC_DIAG_MODE 13
+#define __QLCNIC_MAINTENANCE_MODE 16
+
+#define QLCNIC_INTERRUPT_TEST 1
+#define QLCNIC_LOOPBACK_TEST 2
+#define QLCNIC_LED_TEST 3
+
+#define QLCNIC_FILTER_AGE 80
+#define QLCNIC_READD_AGE 20
+#define QLCNIC_LB_MAX_FILTERS 64
+#define QLCNIC_LB_BUCKET_SIZE 32
+#define QLCNIC_ILB_MAX_RCV_LOOP 10
+
+struct qlcnic_filter {
+ struct hlist_node fnode;
+ u8 faddr[ETH_ALEN];
+ u16 vlan_id;
+ unsigned long ftime;
+};
+
+struct qlcnic_filter_hash {
+ struct hlist_head *fhead;
+ u8 fnum;
+ u16 fmax;
+ u16 fbucket_size;
+};
+
+/* Mailbox specific data structures */
+struct qlcnic_mailbox {
+ struct workqueue_struct *work_q;
+ struct qlcnic_adapter *adapter;
+ const struct qlcnic_mbx_ops *ops;
+ struct work_struct work;
+ struct completion completion;
+ struct list_head cmd_q;
+ unsigned long status;
+ spinlock_t queue_lock; /* Mailbox queue lock */
+ spinlock_t aen_lock; /* Mailbox response/AEN lock */
+ u32 rsp_status;
+ u32 num_cmds;
+};
+
+struct qlcnic_adapter {
+ struct qlcnic_hardware_context *ahw;
+ struct qlcnic_recv_context *recv_ctx;
+ struct qlcnic_host_tx_ring *tx_ring;
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+
+ unsigned long state;
+ u32 flags;
+
+ u16 num_txd;
+ u16 num_rxd;
+ u16 num_jumbo_rxd;
+ u16 max_rxd;
+ u16 max_jumbo_rxd;
+
+ u8 max_rds_rings;
+
+ u8 max_sds_rings; /* max sds rings supported by adapter */
+ u8 max_tx_rings; /* max tx rings supported by adapter */
+
+ u8 drv_tx_rings; /* max tx rings supported by driver */
+ u8 drv_sds_rings; /* max sds rings supported by driver */
+
+ u8 drv_tss_rings; /* tss ring input */
+ u8 drv_rss_rings; /* rss ring input */
+
+ u8 rx_csum;
+ u8 portnum;
+
+ u8 fw_wait_cnt;
+ u8 fw_fail_cnt;
+ u8 tx_timeo_cnt;
+ u8 need_fw_reset;
+ u8 reset_ctx_cnt;
+
+ u16 is_up;
+ u16 rx_pvid;
+ u16 tx_pvid;
+
+ u32 irq;
+ u32 heartbeat;
+
+ u8 dev_state;
+ u8 reset_ack_timeo;
+ u8 dev_init_timeo;
+
+ u8 mac_addr[ETH_ALEN];
+
+ u64 dev_rst_time;
+ bool drv_mac_learn;
+ bool fdb_mac_learn;
+ bool rx_mac_learn;
+ unsigned long vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ u8 flash_mfg_id;
+ struct qlcnic_npar_info *npars;
+ struct qlcnic_eswitch *eswitch;
+ struct qlcnic_nic_template *nic_ops;
+
+ struct qlcnic_adapter_stats stats;
+ struct list_head mac_list;
+
+ void __iomem *tgt_mask_reg;
+ void __iomem *tgt_status_reg;
+ void __iomem *crb_int_state_reg;
+ void __iomem *isr_int_vec;
+
+ struct msix_entry *msix_entries;
+ struct workqueue_struct *qlcnic_wq;
+ struct delayed_work fw_work;
+ struct delayed_work idc_aen_work;
+ struct delayed_work mbx_poll_work;
+ struct qlcnic_dcb *dcb;
+
+ struct qlcnic_filter_hash fhash;
+ struct qlcnic_filter_hash rx_fhash;
+ struct list_head vf_mc_list;
+
+ spinlock_t mac_learn_lock;
+ /* spinlock for catching rcv filters for eswitch traffic */
+ spinlock_t rx_mac_learn_lock;
+ u32 file_prd_off; /*File fw product offset*/
+ u32 fw_version;
+ u32 offload_flags;
+ const struct firmware *fw;
+};
+
+struct qlcnic_info_le {
+ __le16 pci_func;
+ __le16 op_mode; /* 1 = Priv, 2 = NP, 3 = NP passthru */
+ __le16 phys_port;
+ __le16 switch_mode; /* 0 = disabled, 1 = int, 2 = ext */
+
+ __le32 capabilities;
+ u8 max_mac_filters;
+ u8 reserved1;
+ __le16 max_mtu;
+
+ __le16 max_tx_ques;
+ __le16 max_rx_ques;
+ __le16 min_tx_bw;
+ __le16 max_tx_bw;
+ __le32 op_type;
+ __le16 max_bw_reg_offset;
+ __le16 max_linkspeed_reg_offset;
+ __le32 capability1;
+ __le32 capability2;
+ __le32 capability3;
+ __le16 max_tx_mac_filters;
+ __le16 max_rx_mcast_mac_filters;
+ __le16 max_rx_ucast_mac_filters;
+ __le16 max_rx_ip_addr;
+ __le16 max_rx_lro_flow;
+ __le16 max_rx_status_rings;
+ __le16 max_rx_buf_rings;
+ __le16 max_tx_vlan_keys;
+ u8 total_pf;
+ u8 total_rss_engines;
+ __le16 max_vports;
+ __le16 linkstate_reg_offset;
+ __le16 bit_offsets;
+ __le16 max_local_ipv6_addrs;
+ __le16 max_remote_ipv6_addrs;
+ u8 reserved2[56];
+} __packed;
+
+struct qlcnic_info {
+ u16 pci_func;
+ u16 op_mode;
+ u16 phys_port;
+ u16 switch_mode;
+ u32 capabilities;
+ u8 max_mac_filters;
+ u16 max_mtu;
+ u16 max_tx_ques;
+ u16 max_rx_ques;
+ u16 min_tx_bw;
+ u16 max_tx_bw;
+ u32 op_type;
+ u16 max_bw_reg_offset;
+ u16 max_linkspeed_reg_offset;
+ u32 capability1;
+ u32 capability2;
+ u32 capability3;
+ u16 max_tx_mac_filters;
+ u16 max_rx_mcast_mac_filters;
+ u16 max_rx_ucast_mac_filters;
+ u16 max_rx_ip_addr;
+ u16 max_rx_lro_flow;
+ u16 max_rx_status_rings;
+ u16 max_rx_buf_rings;
+ u16 max_tx_vlan_keys;
+ u8 total_pf;
+ u8 total_rss_engines;
+ u16 max_vports;
+ u16 linkstate_reg_offset;
+ u16 bit_offsets;
+ u16 max_local_ipv6_addrs;
+ u16 max_remote_ipv6_addrs;
+};
+
+struct qlcnic_pci_info_le {
+ __le16 id; /* pci function id */
+ __le16 active; /* 1 = Enabled */
+ __le16 type; /* 1 = NIC, 2 = FCoE, 3 = iSCSI */
+ __le16 default_port; /* default port number */
+
+ __le16 tx_min_bw; /* Multiple of 100mbpc */
+ __le16 tx_max_bw;
+ __le16 reserved1[2];
+
+ u8 mac[ETH_ALEN];
+ __le16 func_count;
+ u8 reserved2[104];
+
+} __packed;
+
+struct qlcnic_pci_info {
+ u16 id;
+ u16 active;
+ u16 type;
+ u16 default_port;
+ u16 tx_min_bw;
+ u16 tx_max_bw;
+ u8 mac[ETH_ALEN];
+ u16 func_count;
+};
+
+struct qlcnic_npar_info {
+ bool eswitch_status;
+ u16 pvid;
+ u16 min_bw;
+ u16 max_bw;
+ u8 phy_port;
+ u8 type;
+ u8 active;
+ u8 enable_pm;
+ u8 dest_npar;
+ u8 discard_tagged;
+ u8 mac_override;
+ u8 mac_anti_spoof;
+ u8 promisc_mode;
+ u8 offload_flags;
+ u8 pci_func;
+ u8 mac[ETH_ALEN];
+};
+
+struct qlcnic_eswitch {
+ u8 port;
+ u8 active_vports;
+ u8 active_vlans;
+ u8 active_ucast_filters;
+ u8 max_ucast_filters;
+ u8 max_active_vlans;
+
+ u32 flags;
+#define QLCNIC_SWITCH_ENABLE BIT_1
+#define QLCNIC_SWITCH_VLAN_FILTERING BIT_2
+#define QLCNIC_SWITCH_PROMISC_MODE BIT_3
+#define QLCNIC_SWITCH_PORT_MIRRORING BIT_4
+};
+
+
+#define MAX_BW 100 /* % of link speed */
+#define MIN_BW 1 /* % of link speed */
+#define MAX_VLAN_ID 4095
+#define MIN_VLAN_ID 2
+#define DEFAULT_MAC_LEARN 1
+
+#define IS_VALID_VLAN(vlan) (vlan >= MIN_VLAN_ID && vlan < MAX_VLAN_ID)
+#define IS_VALID_BW(bw) (bw <= MAX_BW)
+
+struct qlcnic_pci_func_cfg {
+ u16 func_type;
+ u16 min_bw;
+ u16 max_bw;
+ u16 port_num;
+ u8 pci_func;
+ u8 func_state;
+ u8 def_mac_addr[ETH_ALEN];
+};
+
+struct qlcnic_npar_func_cfg {
+ u32 fw_capab;
+ u16 port_num;
+ u16 min_bw;
+ u16 max_bw;
+ u16 max_tx_queues;
+ u16 max_rx_queues;
+ u8 pci_func;
+ u8 op_mode;
+};
+
+struct qlcnic_pm_func_cfg {
+ u8 pci_func;
+ u8 action;
+ u8 dest_npar;
+ u8 reserved[5];
+};
+
+struct qlcnic_esw_func_cfg {
+ u16 vlan_id;
+ u8 op_mode;
+ u8 op_type;
+ u8 pci_func;
+ u8 host_vlan_tag;
+ u8 promisc_mode;
+ u8 discard_tagged;
+ u8 mac_override;
+ u8 mac_anti_spoof;
+ u8 offload_flags;
+ u8 reserved[5];
+};
+
+#define QLCNIC_STATS_VERSION 1
+#define QLCNIC_STATS_PORT 1
+#define QLCNIC_STATS_ESWITCH 2
+#define QLCNIC_QUERY_RX_COUNTER 0
+#define QLCNIC_QUERY_TX_COUNTER 1
+#define QLCNIC_STATS_NOT_AVAIL 0xffffffffffffffffULL
+#define QLCNIC_FILL_STATS(VAL1) \
+ (((VAL1) == QLCNIC_STATS_NOT_AVAIL) ? 0 : VAL1)
+#define QLCNIC_MAC_STATS 1
+#define QLCNIC_ESW_STATS 2
+
+#define QLCNIC_ADD_ESW_STATS(VAL1, VAL2)\
+do { \
+ if (((VAL1) == QLCNIC_STATS_NOT_AVAIL) && \
+ ((VAL2) != QLCNIC_STATS_NOT_AVAIL)) \
+ (VAL1) = (VAL2); \
+ else if (((VAL1) != QLCNIC_STATS_NOT_AVAIL) && \
+ ((VAL2) != QLCNIC_STATS_NOT_AVAIL)) \
+ (VAL1) += (VAL2); \
+} while (0)
+
+struct qlcnic_mac_statistics_le {
+ __le64 mac_tx_frames;
+ __le64 mac_tx_bytes;
+ __le64 mac_tx_mcast_pkts;
+ __le64 mac_tx_bcast_pkts;
+ __le64 mac_tx_pause_cnt;
+ __le64 mac_tx_ctrl_pkt;
+ __le64 mac_tx_lt_64b_pkts;
+ __le64 mac_tx_lt_127b_pkts;
+ __le64 mac_tx_lt_255b_pkts;
+ __le64 mac_tx_lt_511b_pkts;
+ __le64 mac_tx_lt_1023b_pkts;
+ __le64 mac_tx_lt_1518b_pkts;
+ __le64 mac_tx_gt_1518b_pkts;
+ __le64 rsvd1[3];
+
+ __le64 mac_rx_frames;
+ __le64 mac_rx_bytes;
+ __le64 mac_rx_mcast_pkts;
+ __le64 mac_rx_bcast_pkts;
+ __le64 mac_rx_pause_cnt;
+ __le64 mac_rx_ctrl_pkt;
+ __le64 mac_rx_lt_64b_pkts;
+ __le64 mac_rx_lt_127b_pkts;
+ __le64 mac_rx_lt_255b_pkts;
+ __le64 mac_rx_lt_511b_pkts;
+ __le64 mac_rx_lt_1023b_pkts;
+ __le64 mac_rx_lt_1518b_pkts;
+ __le64 mac_rx_gt_1518b_pkts;
+ __le64 rsvd2[3];
+
+ __le64 mac_rx_length_error;
+ __le64 mac_rx_length_small;
+ __le64 mac_rx_length_large;
+ __le64 mac_rx_jabber;
+ __le64 mac_rx_dropped;
+ __le64 mac_rx_crc_error;
+ __le64 mac_align_error;
+} __packed;
+
+struct qlcnic_mac_statistics {
+ u64 mac_tx_frames;
+ u64 mac_tx_bytes;
+ u64 mac_tx_mcast_pkts;
+ u64 mac_tx_bcast_pkts;
+ u64 mac_tx_pause_cnt;
+ u64 mac_tx_ctrl_pkt;
+ u64 mac_tx_lt_64b_pkts;
+ u64 mac_tx_lt_127b_pkts;
+ u64 mac_tx_lt_255b_pkts;
+ u64 mac_tx_lt_511b_pkts;
+ u64 mac_tx_lt_1023b_pkts;
+ u64 mac_tx_lt_1518b_pkts;
+ u64 mac_tx_gt_1518b_pkts;
+ u64 rsvd1[3];
+ u64 mac_rx_frames;
+ u64 mac_rx_bytes;
+ u64 mac_rx_mcast_pkts;
+ u64 mac_rx_bcast_pkts;
+ u64 mac_rx_pause_cnt;
+ u64 mac_rx_ctrl_pkt;
+ u64 mac_rx_lt_64b_pkts;
+ u64 mac_rx_lt_127b_pkts;
+ u64 mac_rx_lt_255b_pkts;
+ u64 mac_rx_lt_511b_pkts;
+ u64 mac_rx_lt_1023b_pkts;
+ u64 mac_rx_lt_1518b_pkts;
+ u64 mac_rx_gt_1518b_pkts;
+ u64 rsvd2[3];
+ u64 mac_rx_length_error;
+ u64 mac_rx_length_small;
+ u64 mac_rx_length_large;
+ u64 mac_rx_jabber;
+ u64 mac_rx_dropped;
+ u64 mac_rx_crc_error;
+ u64 mac_align_error;
+};
+
+struct qlcnic_esw_stats_le {
+ __le16 context_id;
+ __le16 version;
+ __le16 size;
+ __le16 unused;
+ __le64 unicast_frames;
+ __le64 multicast_frames;
+ __le64 broadcast_frames;
+ __le64 dropped_frames;
+ __le64 errors;
+ __le64 local_frames;
+ __le64 numbytes;
+ __le64 rsvd[3];
+} __packed;
+
+struct __qlcnic_esw_statistics {
+ u16 context_id;
+ u16 version;
+ u16 size;
+ u16 unused;
+ u64 unicast_frames;
+ u64 multicast_frames;
+ u64 broadcast_frames;
+ u64 dropped_frames;
+ u64 errors;
+ u64 local_frames;
+ u64 numbytes;
+ u64 rsvd[3];
+};
+
+struct qlcnic_esw_statistics {
+ struct __qlcnic_esw_statistics rx;
+ struct __qlcnic_esw_statistics tx;
+};
+
+#define QLCNIC_FORCE_FW_DUMP_KEY 0xdeadfeed
+#define QLCNIC_ENABLE_FW_DUMP 0xaddfeed
+#define QLCNIC_DISABLE_FW_DUMP 0xbadfeed
+#define QLCNIC_FORCE_FW_RESET 0xdeaddead
+#define QLCNIC_SET_QUIESCENT 0xadd00010
+#define QLCNIC_RESET_QUIESCENT 0xadd00020
+
+struct _cdrp_cmd {
+ u32 num;
+ u32 *arg;
+};
+
+struct qlcnic_cmd_args {
+ struct completion completion;
+ struct list_head list;
+ struct _cdrp_cmd req;
+ struct _cdrp_cmd rsp;
+ atomic_t rsp_status;
+ int pay_size;
+ u32 rsp_opcode;
+ u32 total_cmds;
+ u32 op_type;
+ u32 type;
+ u32 cmd_op;
+ u32 *hdr; /* Back channel message header */
+ u32 *pay; /* Back channel message payload */
+ u8 func_num;
+};
+
+int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter);
+int qlcnic_fw_cmd_set_port(struct qlcnic_adapter *adapter, u32 config);
+int qlcnic_pci_mem_write_2M(struct qlcnic_adapter *, u64 off, u64 data);
+int qlcnic_pci_mem_read_2M(struct qlcnic_adapter *, u64 off, u64 *data);
+
+#define ADDR_IN_RANGE(addr, low, high) \
+ (((addr) < (high)) && ((addr) >= (low)))
+
+#define QLCRD32(adapter, off, err) \
+ (adapter->ahw->hw_ops->read_reg)(adapter, off, err)
+
+#define QLCWR32(adapter, off, val) \
+ adapter->ahw->hw_ops->write_reg(adapter, off, val)
+
+int qlcnic_pcie_sem_lock(struct qlcnic_adapter *, int, u32);
+void qlcnic_pcie_sem_unlock(struct qlcnic_adapter *, int);
+
+#define qlcnic_rom_lock(a) \
+ qlcnic_pcie_sem_lock((a), 2, QLCNIC_ROM_LOCK_ID)
+#define qlcnic_rom_unlock(a) \
+ qlcnic_pcie_sem_unlock((a), 2)
+#define qlcnic_phy_lock(a) \
+ qlcnic_pcie_sem_lock((a), 3, QLCNIC_PHY_LOCK_ID)
+#define qlcnic_phy_unlock(a) \
+ qlcnic_pcie_sem_unlock((a), 3)
+#define qlcnic_sw_lock(a) \
+ qlcnic_pcie_sem_lock((a), 6, 0)
+#define qlcnic_sw_unlock(a) \
+ qlcnic_pcie_sem_unlock((a), 6)
+#define crb_win_lock(a) \
+ qlcnic_pcie_sem_lock((a), 7, QLCNIC_CRB_WIN_LOCK_ID)
+#define crb_win_unlock(a) \
+ qlcnic_pcie_sem_unlock((a), 7)
+
+#define __QLCNIC_MAX_LED_RATE 0xf
+#define __QLCNIC_MAX_LED_STATE 0x2
+
+#define MAX_CTL_CHECK 1000
+
+void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter);
+void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter);
+int qlcnic_dump_fw(struct qlcnic_adapter *);
+int qlcnic_enable_fw_dump_state(struct qlcnic_adapter *);
+bool qlcnic_check_fw_dump_state(struct qlcnic_adapter *);
+
+/* Functions from qlcnic_init.c */
+void qlcnic_schedule_work(struct qlcnic_adapter *, work_func_t, int);
+int qlcnic_load_firmware(struct qlcnic_adapter *adapter);
+int qlcnic_need_fw_reset(struct qlcnic_adapter *adapter);
+void qlcnic_request_firmware(struct qlcnic_adapter *adapter);
+void qlcnic_release_firmware(struct qlcnic_adapter *adapter);
+int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter);
+int qlcnic_setup_idc_param(struct qlcnic_adapter *adapter);
+int qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter);
+
+int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, u32 addr, u32 *valp);
+int qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
+ u8 *bytes, size_t size);
+int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter);
+void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter);
+
+void __iomem *qlcnic_get_ioaddr(struct qlcnic_hardware_context *, u32);
+
+int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter);
+void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter);
+
+int qlcnic_fw_create_ctx(struct qlcnic_adapter *adapter);
+void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter);
+
+void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter);
+void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter);
+void qlcnic_release_tx_buffers(struct qlcnic_adapter *,
+ struct qlcnic_host_tx_ring *);
+
+int qlcnic_check_fw_status(struct qlcnic_adapter *adapter);
+void qlcnic_watchdog_task(struct work_struct *work);
+void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_rds_ring *rds_ring, u8 ring_id);
+void qlcnic_set_multi(struct net_device *netdev);
+void qlcnic_flush_mcast_mac(struct qlcnic_adapter *);
+int qlcnic_nic_add_mac(struct qlcnic_adapter *, const u8 *, u16,
+ enum qlcnic_mac_type);
+int qlcnic_nic_del_mac(struct qlcnic_adapter *, const u8 *);
+void qlcnic_82xx_free_mac_list(struct qlcnic_adapter *adapter);
+int qlcnic_82xx_read_phys_port_id(struct qlcnic_adapter *);
+
+int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu);
+int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *, u32);
+int qlcnic_change_mtu(struct net_device *netdev, int new_mtu);
+netdev_features_t qlcnic_fix_features(struct net_device *netdev,
+ netdev_features_t features);
+int qlcnic_set_features(struct net_device *netdev, netdev_features_t features);
+int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable);
+void qlcnic_update_cmd_producer(struct qlcnic_host_tx_ring *);
+
+/* Functions from qlcnic_ethtool.c */
+int qlcnic_check_loopback_buff(unsigned char *, u8 []);
+int qlcnic_do_lb_test(struct qlcnic_adapter *, u8);
+
+/* Functions from qlcnic_main.c */
+int qlcnic_reset_context(struct qlcnic_adapter *);
+void qlcnic_diag_free_res(struct net_device *netdev, int);
+int qlcnic_diag_alloc_res(struct net_device *netdev, int);
+netdev_tx_t qlcnic_xmit_frame(struct sk_buff *, struct net_device *);
+void qlcnic_set_tx_ring_count(struct qlcnic_adapter *, u8);
+void qlcnic_set_sds_ring_count(struct qlcnic_adapter *, u8);
+int qlcnic_setup_rings(struct qlcnic_adapter *);
+int qlcnic_validate_rings(struct qlcnic_adapter *, __u32, int);
+void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter);
+int qlcnic_enable_msix(struct qlcnic_adapter *, u32);
+void qlcnic_set_drv_version(struct qlcnic_adapter *);
+
+/* eSwitch management functions */
+int qlcnic_config_switch_port(struct qlcnic_adapter *,
+ struct qlcnic_esw_func_cfg *);
+
+int qlcnic_get_eswitch_port_config(struct qlcnic_adapter *,
+ struct qlcnic_esw_func_cfg *);
+int qlcnic_config_port_mirroring(struct qlcnic_adapter *, u8, u8, u8);
+int qlcnic_get_port_stats(struct qlcnic_adapter *, const u8, const u8,
+ struct __qlcnic_esw_statistics *);
+int qlcnic_get_eswitch_stats(struct qlcnic_adapter *, const u8, u8,
+ struct __qlcnic_esw_statistics *);
+int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, u8, u8, u8);
+int qlcnic_get_mac_stats(struct qlcnic_adapter *, struct qlcnic_mac_statistics *);
+
+void qlcnic_free_mbx_args(struct qlcnic_cmd_args *cmd);
+
+int qlcnic_alloc_sds_rings(struct qlcnic_recv_context *, int);
+void qlcnic_free_sds_rings(struct qlcnic_recv_context *);
+void qlcnic_advert_link_change(struct qlcnic_adapter *, int);
+void qlcnic_free_tx_rings(struct qlcnic_adapter *);
+int qlcnic_alloc_tx_rings(struct qlcnic_adapter *, struct net_device *);
+void qlcnic_dump_mbx(struct qlcnic_adapter *, struct qlcnic_cmd_args *);
+
+void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
+void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
+void qlcnic_82xx_add_sysfs(struct qlcnic_adapter *adapter);
+void qlcnic_82xx_remove_sysfs(struct qlcnic_adapter *adapter);
+
+int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
+int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
+void qlcnic_set_vlan_config(struct qlcnic_adapter *,
+ struct qlcnic_esw_func_cfg *);
+void qlcnic_set_eswitch_port_features(struct qlcnic_adapter *,
+ struct qlcnic_esw_func_cfg *);
+int qlcnic_setup_tss_rss_intr(struct qlcnic_adapter *);
+void qlcnic_down(struct qlcnic_adapter *, struct net_device *);
+int qlcnic_up(struct qlcnic_adapter *, struct net_device *);
+void __qlcnic_down(struct qlcnic_adapter *, struct net_device *);
+void qlcnic_detach(struct qlcnic_adapter *);
+void qlcnic_teardown_intr(struct qlcnic_adapter *);
+int qlcnic_attach(struct qlcnic_adapter *);
+int __qlcnic_up(struct qlcnic_adapter *, struct net_device *);
+void qlcnic_restore_indev_addr(struct net_device *, unsigned long);
+
+int qlcnic_check_temp(struct qlcnic_adapter *);
+int qlcnic_init_pci_info(struct qlcnic_adapter *);
+int qlcnic_set_default_offload_settings(struct qlcnic_adapter *);
+int qlcnic_reset_npar_config(struct qlcnic_adapter *);
+int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *);
+int qlcnic_set_vxlan_port(struct qlcnic_adapter *adapter, u16 port);
+int qlcnic_set_vxlan_parsing(struct qlcnic_adapter *adapter, u16 port);
+int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter);
+int qlcnic_read_mac_addr(struct qlcnic_adapter *);
+int qlcnic_setup_netdev(struct qlcnic_adapter *, struct net_device *);
+void qlcnic_set_netdev_features(struct qlcnic_adapter *,
+ struct qlcnic_esw_func_cfg *);
+void qlcnic_sriov_vf_set_multi(struct net_device *);
+int qlcnic_is_valid_nic_func(struct qlcnic_adapter *, u8);
+int qlcnic_get_pci_func_type(struct qlcnic_adapter *, u16, u16 *, u16 *,
+ u16 *);
+
+/*
+ * QLOGIC Board information
+ */
+
+#define QLCNIC_MAX_BOARD_NAME_LEN 100
+struct qlcnic_board_info {
+ unsigned short vendor;
+ unsigned short device;
+ unsigned short sub_vendor;
+ unsigned short sub_device;
+ char short_name[QLCNIC_MAX_BOARD_NAME_LEN];
+};
+
+static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
+{
+ if (likely(tx_ring->producer < tx_ring->sw_consumer))
+ return tx_ring->sw_consumer - tx_ring->producer;
+ else
+ return tx_ring->sw_consumer + tx_ring->num_desc -
+ tx_ring->producer;
+}
+
+struct qlcnic_nic_template {
+ int (*config_bridged_mode) (struct qlcnic_adapter *, u32);
+ int (*config_led) (struct qlcnic_adapter *, u32, u32);
+ int (*start_firmware) (struct qlcnic_adapter *);
+ int (*init_driver) (struct qlcnic_adapter *);
+ void (*request_reset) (struct qlcnic_adapter *, u32);
+ void (*cancel_idc_work) (struct qlcnic_adapter *);
+ int (*napi_add)(struct qlcnic_adapter *, struct net_device *);
+ void (*napi_del)(struct qlcnic_adapter *);
+ void (*config_ipaddr)(struct qlcnic_adapter *, __be32, int);
+ irqreturn_t (*clear_legacy_intr)(struct qlcnic_adapter *);
+ int (*shutdown)(struct pci_dev *);
+ int (*resume)(struct qlcnic_adapter *);
+};
+
+struct qlcnic_mbx_ops {
+ int (*enqueue_cmd) (struct qlcnic_adapter *,
+ struct qlcnic_cmd_args *, unsigned long *);
+ void (*dequeue_cmd) (struct qlcnic_adapter *, struct qlcnic_cmd_args *);
+ void (*decode_resp) (struct qlcnic_adapter *, struct qlcnic_cmd_args *);
+ void (*encode_cmd) (struct qlcnic_adapter *, struct qlcnic_cmd_args *);
+ void (*nofity_fw) (struct qlcnic_adapter *, u8);
+};
+
+int qlcnic_83xx_init_mailbox_work(struct qlcnic_adapter *);
+void qlcnic_83xx_detach_mailbox_work(struct qlcnic_adapter *);
+void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx);
+void qlcnic_83xx_free_mailbox(struct qlcnic_mailbox *mbx);
+void qlcnic_update_stats(struct qlcnic_adapter *);
+
+/* Adapter hardware abstraction */
+struct qlcnic_hardware_ops {
+ void (*read_crb) (struct qlcnic_adapter *, char *, loff_t, size_t);
+ void (*write_crb) (struct qlcnic_adapter *, char *, loff_t, size_t);
+ int (*read_reg) (struct qlcnic_adapter *, ulong, int *);
+ int (*write_reg) (struct qlcnic_adapter *, ulong, u32);
+ void (*get_ocm_win) (struct qlcnic_hardware_context *);
+ int (*get_mac_address) (struct qlcnic_adapter *, u8 *, u8);
+ int (*setup_intr) (struct qlcnic_adapter *);
+ int (*alloc_mbx_args)(struct qlcnic_cmd_args *,
+ struct qlcnic_adapter *, u32);
+ int (*mbx_cmd) (struct qlcnic_adapter *, struct qlcnic_cmd_args *);
+ void (*get_func_no) (struct qlcnic_adapter *);
+ int (*api_lock) (struct qlcnic_adapter *);
+ void (*api_unlock) (struct qlcnic_adapter *);
+ void (*add_sysfs) (struct qlcnic_adapter *);
+ void (*remove_sysfs) (struct qlcnic_adapter *);
+ void (*process_lb_rcv_ring_diag) (struct qlcnic_host_sds_ring *);
+ int (*create_rx_ctx) (struct qlcnic_adapter *);
+ int (*create_tx_ctx) (struct qlcnic_adapter *,
+ struct qlcnic_host_tx_ring *, int);
+ void (*del_rx_ctx) (struct qlcnic_adapter *);
+ void (*del_tx_ctx) (struct qlcnic_adapter *,
+ struct qlcnic_host_tx_ring *);
+ int (*setup_link_event) (struct qlcnic_adapter *, int);
+ int (*get_nic_info) (struct qlcnic_adapter *, struct qlcnic_info *, u8);
+ int (*get_pci_info) (struct qlcnic_adapter *, struct qlcnic_pci_info *);
+ int (*set_nic_info) (struct qlcnic_adapter *, struct qlcnic_info *);
+ int (*change_macvlan) (struct qlcnic_adapter *, u8*, u16, u8);
+ void (*napi_enable) (struct qlcnic_adapter *);
+ void (*napi_disable) (struct qlcnic_adapter *);
+ int (*config_intr_coal) (struct qlcnic_adapter *,
+ struct ethtool_coalesce *);
+ int (*config_rss) (struct qlcnic_adapter *, int);
+ int (*config_hw_lro) (struct qlcnic_adapter *, int);
+ int (*config_loopback) (struct qlcnic_adapter *, u8);
+ int (*clear_loopback) (struct qlcnic_adapter *, u8);
+ int (*config_promisc_mode) (struct qlcnic_adapter *, u32);
+ void (*change_l2_filter)(struct qlcnic_adapter *adapter, u64 *addr,
+ u16 vlan, struct qlcnic_host_tx_ring *tx_ring);
+ int (*get_board_info) (struct qlcnic_adapter *);
+ void (*set_mac_filter_count) (struct qlcnic_adapter *);
+ void (*free_mac_list) (struct qlcnic_adapter *);
+ int (*read_phys_port_id) (struct qlcnic_adapter *);
+ pci_ers_result_t (*io_error_detected) (struct pci_dev *,
+ pci_channel_state_t);
+ pci_ers_result_t (*io_slot_reset) (struct pci_dev *);
+ void (*io_resume) (struct pci_dev *);
+ void (*get_beacon_state)(struct qlcnic_adapter *);
+ void (*enable_sds_intr) (struct qlcnic_adapter *,
+ struct qlcnic_host_sds_ring *);
+ void (*disable_sds_intr) (struct qlcnic_adapter *,
+ struct qlcnic_host_sds_ring *);
+ void (*enable_tx_intr) (struct qlcnic_adapter *,
+ struct qlcnic_host_tx_ring *);
+ void (*disable_tx_intr) (struct qlcnic_adapter *,
+ struct qlcnic_host_tx_ring *);
+ u32 (*get_saved_state)(void *, u32);
+ void (*set_saved_state)(void *, u32, u32);
+ void (*cache_tmpl_hdr_values)(struct qlcnic_fw_dump *);
+ u32 (*get_cap_size)(void *, int);
+ void (*set_sys_info)(void *, int, u32);
+ void (*store_cap_mask)(void *, u32);
+ bool (*encap_rx_offload) (struct qlcnic_adapter *adapter);
+ bool (*encap_tx_offload) (struct qlcnic_adapter *adapter);
+};
+
+extern struct qlcnic_nic_template qlcnic_vf_ops;
+
+static inline bool qlcnic_83xx_encap_tx_offload(struct qlcnic_adapter *adapter)
+{
+ return adapter->ahw->extra_capability[0] &
+ QLCNIC_83XX_FW_CAPAB_ENCAP_TX_OFFLOAD;
+}
+
+static inline bool qlcnic_83xx_encap_rx_offload(struct qlcnic_adapter *adapter)
+{
+ return adapter->ahw->extra_capability[0] &
+ QLCNIC_83XX_FW_CAPAB_ENCAP_RX_OFFLOAD;
+}
+
+static inline bool qlcnic_82xx_encap_tx_offload(struct qlcnic_adapter *adapter)
+{
+ return false;
+}
+
+static inline bool qlcnic_82xx_encap_rx_offload(struct qlcnic_adapter *adapter)
+{
+ return false;
+}
+
+static inline bool qlcnic_encap_rx_offload(struct qlcnic_adapter *adapter)
+{
+ return adapter->ahw->hw_ops->encap_rx_offload(adapter);
+}
+
+static inline bool qlcnic_encap_tx_offload(struct qlcnic_adapter *adapter)
+{
+ return adapter->ahw->hw_ops->encap_tx_offload(adapter);
+}
+
+static inline int qlcnic_start_firmware(struct qlcnic_adapter *adapter)
+{
+ return adapter->nic_ops->start_firmware(adapter);
+}
+
+static inline void qlcnic_read_crb(struct qlcnic_adapter *adapter, char *buf,
+ loff_t offset, size_t size)
+{
+ adapter->ahw->hw_ops->read_crb(adapter, buf, offset, size);
+}
+
+static inline void qlcnic_write_crb(struct qlcnic_adapter *adapter, char *buf,
+ loff_t offset, size_t size)
+{
+ adapter->ahw->hw_ops->write_crb(adapter, buf, offset, size);
+}
+
+static inline int qlcnic_get_mac_address(struct qlcnic_adapter *adapter,
+ u8 *mac, u8 function)
+{
+ return adapter->ahw->hw_ops->get_mac_address(adapter, mac, function);
+}
+
+static inline int qlcnic_setup_intr(struct qlcnic_adapter *adapter)
+{
+ return adapter->ahw->hw_ops->setup_intr(adapter);
+}
+
+static inline int qlcnic_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
+ struct qlcnic_adapter *adapter, u32 arg)
+{
+ return adapter->ahw->hw_ops->alloc_mbx_args(mbx, adapter, arg);
+}
+
+static inline int qlcnic_issue_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ if (adapter->ahw->hw_ops->mbx_cmd)
+ return adapter->ahw->hw_ops->mbx_cmd(adapter, cmd);
+
+ return -EIO;
+}
+
+static inline void qlcnic_get_func_no(struct qlcnic_adapter *adapter)
+{
+ adapter->ahw->hw_ops->get_func_no(adapter);
+}
+
+static inline int qlcnic_api_lock(struct qlcnic_adapter *adapter)
+{
+ return adapter->ahw->hw_ops->api_lock(adapter);
+}
+
+static inline void qlcnic_api_unlock(struct qlcnic_adapter *adapter)
+{
+ adapter->ahw->hw_ops->api_unlock(adapter);
+}
+
+static inline void qlcnic_add_sysfs(struct qlcnic_adapter *adapter)
+{
+ if (adapter->ahw->hw_ops->add_sysfs)
+ adapter->ahw->hw_ops->add_sysfs(adapter);
+}
+
+static inline void qlcnic_remove_sysfs(struct qlcnic_adapter *adapter)
+{
+ if (adapter->ahw->hw_ops->remove_sysfs)
+ adapter->ahw->hw_ops->remove_sysfs(adapter);
+}
+
+static inline void
+qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
+{
+ sds_ring->adapter->ahw->hw_ops->process_lb_rcv_ring_diag(sds_ring);
+}
+
+static inline int qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
+{
+ return adapter->ahw->hw_ops->create_rx_ctx(adapter);
+}
+
+static inline int qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *ptr,
+ int ring)
+{
+ return adapter->ahw->hw_ops->create_tx_ctx(adapter, ptr, ring);
+}
+
+static inline void qlcnic_fw_cmd_del_rx_ctx(struct qlcnic_adapter *adapter)
+{
+ return adapter->ahw->hw_ops->del_rx_ctx(adapter);
+}
+
+static inline void qlcnic_fw_cmd_del_tx_ctx(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *ptr)
+{
+ return adapter->ahw->hw_ops->del_tx_ctx(adapter, ptr);
+}
+
+static inline int qlcnic_linkevent_request(struct qlcnic_adapter *adapter,
+ int enable)
+{
+ return adapter->ahw->hw_ops->setup_link_event(adapter, enable);
+}
+
+static inline int qlcnic_get_nic_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *info, u8 id)
+{
+ return adapter->ahw->hw_ops->get_nic_info(adapter, info, id);
+}
+
+static inline int qlcnic_get_pci_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_pci_info *info)
+{
+ return adapter->ahw->hw_ops->get_pci_info(adapter, info);
+}
+
+static inline int qlcnic_set_nic_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *info)
+{
+ return adapter->ahw->hw_ops->set_nic_info(adapter, info);
+}
+
+static inline int qlcnic_sre_macaddr_change(struct qlcnic_adapter *adapter,
+ u8 *addr, u16 id, u8 cmd)
+{
+ return adapter->ahw->hw_ops->change_macvlan(adapter, addr, id, cmd);
+}
+
+static inline int qlcnic_napi_add(struct qlcnic_adapter *adapter,
+ struct net_device *netdev)
+{
+ return adapter->nic_ops->napi_add(adapter, netdev);
+}
+
+static inline void qlcnic_napi_del(struct qlcnic_adapter *adapter)
+{
+ adapter->nic_ops->napi_del(adapter);
+}
+
+static inline void qlcnic_napi_enable(struct qlcnic_adapter *adapter)
+{
+ adapter->ahw->hw_ops->napi_enable(adapter);
+}
+
+static inline int __qlcnic_shutdown(struct pci_dev *pdev)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+
+ return adapter->nic_ops->shutdown(pdev);
+}
+
+static inline int __qlcnic_resume(struct qlcnic_adapter *adapter)
+{
+ return adapter->nic_ops->resume(adapter);
+}
+
+static inline void qlcnic_napi_disable(struct qlcnic_adapter *adapter)
+{
+ adapter->ahw->hw_ops->napi_disable(adapter);
+}
+
+static inline int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter,
+ struct ethtool_coalesce *ethcoal)
+{
+ return adapter->ahw->hw_ops->config_intr_coal(adapter, ethcoal);
+}
+
+static inline int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable)
+{
+ return adapter->ahw->hw_ops->config_rss(adapter, enable);
+}
+
+static inline int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter,
+ int enable)
+{
+ return adapter->ahw->hw_ops->config_hw_lro(adapter, enable);
+}
+
+static inline int qlcnic_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
+{
+ return adapter->ahw->hw_ops->config_loopback(adapter, mode);
+}
+
+static inline int qlcnic_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
+{
+ return adapter->ahw->hw_ops->clear_loopback(adapter, mode);
+}
+
+static inline int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter,
+ u32 mode)
+{
+ return adapter->ahw->hw_ops->config_promisc_mode(adapter, mode);
+}
+
+static inline void qlcnic_change_filter(struct qlcnic_adapter *adapter,
+ u64 *addr, u16 vlan,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ adapter->ahw->hw_ops->change_l2_filter(adapter, addr, vlan, tx_ring);
+}
+
+static inline int qlcnic_get_board_info(struct qlcnic_adapter *adapter)
+{
+ return adapter->ahw->hw_ops->get_board_info(adapter);
+}
+
+static inline void qlcnic_free_mac_list(struct qlcnic_adapter *adapter)
+{
+ return adapter->ahw->hw_ops->free_mac_list(adapter);
+}
+
+static inline void qlcnic_set_mac_filter_count(struct qlcnic_adapter *adapter)
+{
+ if (adapter->ahw->hw_ops->set_mac_filter_count)
+ adapter->ahw->hw_ops->set_mac_filter_count(adapter);
+}
+
+static inline void qlcnic_get_beacon_state(struct qlcnic_adapter *adapter)
+{
+ adapter->ahw->hw_ops->get_beacon_state(adapter);
+}
+
+static inline void qlcnic_read_phys_port_id(struct qlcnic_adapter *adapter)
+{
+ if (adapter->ahw->hw_ops->read_phys_port_id)
+ adapter->ahw->hw_ops->read_phys_port_id(adapter);
+}
+
+static inline u32 qlcnic_get_saved_state(struct qlcnic_adapter *adapter,
+ void *t_hdr, u32 index)
+{
+ return adapter->ahw->hw_ops->get_saved_state(t_hdr, index);
+}
+
+static inline void qlcnic_set_saved_state(struct qlcnic_adapter *adapter,
+ void *t_hdr, u32 index, u32 value)
+{
+ adapter->ahw->hw_ops->set_saved_state(t_hdr, index, value);
+}
+
+static inline void qlcnic_cache_tmpl_hdr_values(struct qlcnic_adapter *adapter,
+ struct qlcnic_fw_dump *fw_dump)
+{
+ adapter->ahw->hw_ops->cache_tmpl_hdr_values(fw_dump);
+}
+
+static inline u32 qlcnic_get_cap_size(struct qlcnic_adapter *adapter,
+ void *tmpl_hdr, int index)
+{
+ return adapter->ahw->hw_ops->get_cap_size(tmpl_hdr, index);
+}
+
+static inline void qlcnic_set_sys_info(struct qlcnic_adapter *adapter,
+ void *tmpl_hdr, int idx, u32 value)
+{
+ adapter->ahw->hw_ops->set_sys_info(tmpl_hdr, idx, value);
+}
+
+static inline void qlcnic_store_cap_mask(struct qlcnic_adapter *adapter,
+ void *tmpl_hdr, u32 mask)
+{
+ adapter->ahw->hw_ops->store_cap_mask(tmpl_hdr, mask);
+}
+
+static inline void qlcnic_dev_request_reset(struct qlcnic_adapter *adapter,
+ u32 key)
+{
+ if (adapter->nic_ops->request_reset)
+ adapter->nic_ops->request_reset(adapter, key);
+}
+
+static inline void qlcnic_cancel_idc_work(struct qlcnic_adapter *adapter)
+{
+ if (adapter->nic_ops->cancel_idc_work)
+ adapter->nic_ops->cancel_idc_work(adapter);
+}
+
+static inline irqreturn_t
+qlcnic_clear_legacy_intr(struct qlcnic_adapter *adapter)
+{
+ return adapter->nic_ops->clear_legacy_intr(adapter);
+}
+
+static inline int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state,
+ u32 rate)
+{
+ return adapter->nic_ops->config_led(adapter, state, rate);
+}
+
+static inline void qlcnic_config_ipaddr(struct qlcnic_adapter *adapter,
+ __be32 ip, int cmd)
+{
+ adapter->nic_ops->config_ipaddr(adapter, ip, cmd);
+}
+
+static inline bool qlcnic_check_multi_tx(struct qlcnic_adapter *adapter)
+{
+ return test_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state);
+}
+
+static inline void
+qlcnic_82xx_enable_tx_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test)
+ writel(0x0, tx_ring->crb_intr_mask);
+}
+
+static inline void
+qlcnic_82xx_disable_tx_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test)
+ writel(1, tx_ring->crb_intr_mask);
+}
+
+static inline void
+qlcnic_83xx_enable_tx_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ writel(0, tx_ring->crb_intr_mask);
+}
+
+static inline void
+qlcnic_83xx_disable_tx_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ writel(1, tx_ring->crb_intr_mask);
+}
+
+/* Enable MSI-x and INT-x interrupts */
+static inline void
+qlcnic_83xx_enable_sds_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring)
+{
+ writel(0, sds_ring->crb_intr_mask);
+}
+
+/* Disable MSI-x and INT-x interrupts */
+static inline void
+qlcnic_83xx_disable_sds_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring)
+{
+ writel(1, sds_ring->crb_intr_mask);
+}
+
+static inline void qlcnic_disable_multi_tx(struct qlcnic_adapter *adapter)
+{
+ test_and_clear_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state);
+ adapter->drv_tx_rings = QLCNIC_SINGLE_RING;
+}
+
+/* When operating in a muti tx mode, driver needs to write 0x1
+ * to src register, instead of 0x0 to disable receiving interrupt.
+ */
+static inline void
+qlcnic_82xx_disable_sds_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring)
+{
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED))
+ writel(0x1, sds_ring->crb_intr_mask);
+ else
+ writel(0, sds_ring->crb_intr_mask);
+}
+
+static inline void qlcnic_enable_sds_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring)
+{
+ if (adapter->ahw->hw_ops->enable_sds_intr)
+ adapter->ahw->hw_ops->enable_sds_intr(adapter, sds_ring);
+}
+
+static inline void
+qlcnic_disable_sds_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring)
+{
+ if (adapter->ahw->hw_ops->disable_sds_intr)
+ adapter->ahw->hw_ops->disable_sds_intr(adapter, sds_ring);
+}
+
+static inline void qlcnic_enable_tx_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ if (adapter->ahw->hw_ops->enable_tx_intr)
+ adapter->ahw->hw_ops->enable_tx_intr(adapter, tx_ring);
+}
+
+static inline void qlcnic_disable_tx_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ if (adapter->ahw->hw_ops->disable_tx_intr)
+ adapter->ahw->hw_ops->disable_tx_intr(adapter, tx_ring);
+}
+
+/* When operating in a muti tx mode, driver needs to write 0x0
+ * to src register, instead of 0x1 to enable receiving interrupts.
+ */
+static inline void
+qlcnic_82xx_enable_sds_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring)
+{
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED))
+ writel(0, sds_ring->crb_intr_mask);
+ else
+ writel(0x1, sds_ring->crb_intr_mask);
+
+ if (!QLCNIC_IS_MSI_FAMILY(adapter))
+ writel(0xfbff, adapter->tgt_mask_reg);
+}
+
+static inline int qlcnic_get_diag_lock(struct qlcnic_adapter *adapter)
+{
+ return test_and_set_bit(__QLCNIC_DIAG_MODE, &adapter->state);
+}
+
+static inline void qlcnic_release_diag_lock(struct qlcnic_adapter *adapter)
+{
+ clear_bit(__QLCNIC_DIAG_MODE, &adapter->state);
+}
+
+static inline int qlcnic_check_diag_status(struct qlcnic_adapter *adapter)
+{
+ return test_bit(__QLCNIC_DIAG_MODE, &adapter->state);
+}
+
+extern const struct ethtool_ops qlcnic_sriov_vf_ethtool_ops;
+extern const struct ethtool_ops qlcnic_ethtool_ops;
+extern const struct ethtool_ops qlcnic_ethtool_failed_ops;
+
+#define QLCDB(adapter, lvl, _fmt, _args...) do { \
+ if (NETIF_MSG_##lvl & adapter->ahw->msg_enable) \
+ printk(KERN_INFO "%s: %s: " _fmt, \
+ dev_name(&adapter->pdev->dev), \
+ __func__, ##_args); \
+ } while (0)
+
+#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
+#define PCI_DEVICE_ID_QLOGIC_QLE834X 0x8030
+#define PCI_DEVICE_ID_QLOGIC_VF_QLE834X 0x8430
+#define PCI_DEVICE_ID_QLOGIC_QLE8830 0x8830
+#define PCI_DEVICE_ID_QLOGIC_VF_QLE8C30 0x8C30
+#define PCI_DEVICE_ID_QLOGIC_QLE844X 0x8040
+#define PCI_DEVICE_ID_QLOGIC_VF_QLE844X 0x8440
+
+static inline bool qlcnic_82xx_check(struct qlcnic_adapter *adapter)
+{
+ unsigned short device = adapter->pdev->device;
+ return (device == PCI_DEVICE_ID_QLOGIC_QLE824X) ? true : false;
+}
+
+static inline bool qlcnic_84xx_check(struct qlcnic_adapter *adapter)
+{
+ unsigned short device = adapter->pdev->device;
+
+ return ((device == PCI_DEVICE_ID_QLOGIC_QLE844X) ||
+ (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X)) ? true : false;
+}
+
+static inline bool qlcnic_83xx_check(struct qlcnic_adapter *adapter)
+{
+ unsigned short device = adapter->pdev->device;
+ bool status;
+
+ status = ((device == PCI_DEVICE_ID_QLOGIC_QLE834X) ||
+ (device == PCI_DEVICE_ID_QLOGIC_QLE8830) ||
+ (device == PCI_DEVICE_ID_QLOGIC_QLE844X) ||
+ (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X) ||
+ (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ||
+ (device == PCI_DEVICE_ID_QLOGIC_VF_QLE8C30)) ? true : false;
+
+ return status;
+}
+
+static inline bool qlcnic_sriov_pf_check(struct qlcnic_adapter *adapter)
+{
+ return (adapter->ahw->op_mode == QLCNIC_SRIOV_PF_FUNC) ? true : false;
+}
+
+static inline bool qlcnic_sriov_vf_check(struct qlcnic_adapter *adapter)
+{
+ unsigned short device = adapter->pdev->device;
+ bool status;
+
+ status = ((device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ||
+ (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X) ||
+ (device == PCI_DEVICE_ID_QLOGIC_VF_QLE8C30)) ? true : false;
+
+ return status;
+}
+
+static inline bool qlcnic_83xx_pf_check(struct qlcnic_adapter *adapter)
+{
+ unsigned short device = adapter->pdev->device;
+
+ return (device == PCI_DEVICE_ID_QLOGIC_QLE834X) ? true : false;
+}
+
+static inline bool qlcnic_83xx_vf_check(struct qlcnic_adapter *adapter)
+{
+ unsigned short device = adapter->pdev->device;
+
+ return ((device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ||
+ (device == PCI_DEVICE_ID_QLOGIC_VF_QLE8C30)) ? true : false;
+}
+
+static inline bool qlcnic_sriov_check(struct qlcnic_adapter *adapter)
+{
+ bool status;
+
+ status = (qlcnic_sriov_pf_check(adapter) ||
+ qlcnic_sriov_vf_check(adapter)) ? true : false;
+
+ return status;
+}
+
+static inline u32 qlcnic_get_vnic_func_count(struct qlcnic_adapter *adapter)
+{
+ if (qlcnic_84xx_check(adapter))
+ return QLC_84XX_VNIC_COUNT;
+ else
+ return QLC_DEFAULT_VNIC_COUNT;
+}
+
+static inline void qlcnic_swap32_buffer(u32 *buffer, int count)
+{
+#if defined(__BIG_ENDIAN)
+ u32 *tmp = buffer;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ *tmp = swab32(*tmp);
+ tmp++;
+ }
+#endif
+}
+
+#ifdef CONFIG_QLCNIC_HWMON
+void qlcnic_register_hwmon_dev(struct qlcnic_adapter *);
+void qlcnic_unregister_hwmon_dev(struct qlcnic_adapter *);
+#else
+static inline void qlcnic_register_hwmon_dev(struct qlcnic_adapter *adapter)
+{
+ return;
+}
+static inline void qlcnic_unregister_hwmon_dev(struct qlcnic_adapter *adapter)
+{
+ return;
+}
+#endif
+#endif /* __QLCNIC_H_ */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
new file mode 100644
index 000000000..2fd5c6fdb
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -0,0 +1,4241 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ */
+
+#include <linux/if_vlan.h>
+#include <linux/ipv6.h>
+#include <linux/ethtool.h>
+#include <linux/interrupt.h>
+#include <linux/aer.h>
+
+#include "qlcnic.h"
+#include "qlcnic_sriov.h"
+
+static void __qlcnic_83xx_process_aen(struct qlcnic_adapter *);
+static int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *, u8);
+static void qlcnic_83xx_configure_mac(struct qlcnic_adapter *, u8 *, u8,
+ struct qlcnic_cmd_args *);
+static int qlcnic_83xx_get_port_config(struct qlcnic_adapter *);
+static irqreturn_t qlcnic_83xx_handle_aen(int, void *);
+static pci_ers_result_t qlcnic_83xx_io_error_detected(struct pci_dev *,
+ pci_channel_state_t);
+static int qlcnic_83xx_set_port_config(struct qlcnic_adapter *);
+static pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *);
+static void qlcnic_83xx_io_resume(struct pci_dev *);
+static int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *, u8);
+static void qlcnic_83xx_set_mac_filter_count(struct qlcnic_adapter *);
+static int qlcnic_83xx_resume(struct qlcnic_adapter *);
+static int qlcnic_83xx_shutdown(struct pci_dev *);
+static void qlcnic_83xx_get_beacon_state(struct qlcnic_adapter *);
+
+#define RSS_HASHTYPE_IP_TCP 0x3
+#define QLC_83XX_FW_MBX_CMD 0
+#define QLC_SKIP_INACTIVE_PCI_REGS 7
+#define QLC_MAX_LEGACY_FUNC_SUPP 8
+
+/* 83xx Module type */
+#define QLC_83XX_MODULE_FIBRE_10GBASE_LRM 0x1 /* 10GBase-LRM */
+#define QLC_83XX_MODULE_FIBRE_10GBASE_LR 0x2 /* 10GBase-LR */
+#define QLC_83XX_MODULE_FIBRE_10GBASE_SR 0x3 /* 10GBase-SR */
+#define QLC_83XX_MODULE_DA_10GE_PASSIVE_CP 0x4 /* 10GE passive
+ * copper(compliant)
+ */
+#define QLC_83XX_MODULE_DA_10GE_ACTIVE_CP 0x5 /* 10GE active limiting
+ * copper(compliant)
+ */
+#define QLC_83XX_MODULE_DA_10GE_LEGACY_CP 0x6 /* 10GE passive copper
+ * (legacy, best effort)
+ */
+#define QLC_83XX_MODULE_FIBRE_1000BASE_SX 0x7 /* 1000Base-SX */
+#define QLC_83XX_MODULE_FIBRE_1000BASE_LX 0x8 /* 1000Base-LX */
+#define QLC_83XX_MODULE_FIBRE_1000BASE_CX 0x9 /* 1000Base-CX */
+#define QLC_83XX_MODULE_TP_1000BASE_T 0xa /* 1000Base-T*/
+#define QLC_83XX_MODULE_DA_1GE_PASSIVE_CP 0xb /* 1GE passive copper
+ * (legacy, best effort)
+ */
+#define QLC_83XX_MODULE_UNKNOWN 0xf /* Unknown module type */
+
+/* Port types */
+#define QLC_83XX_10_CAPABLE BIT_8
+#define QLC_83XX_100_CAPABLE BIT_9
+#define QLC_83XX_1G_CAPABLE BIT_10
+#define QLC_83XX_10G_CAPABLE BIT_11
+#define QLC_83XX_AUTONEG_ENABLE BIT_15
+
+static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
+ {QLCNIC_CMD_CONFIGURE_IP_ADDR, 6, 1},
+ {QLCNIC_CMD_CONFIG_INTRPT, 18, 34},
+ {QLCNIC_CMD_CREATE_RX_CTX, 136, 27},
+ {QLCNIC_CMD_DESTROY_RX_CTX, 2, 1},
+ {QLCNIC_CMD_CREATE_TX_CTX, 54, 18},
+ {QLCNIC_CMD_DESTROY_TX_CTX, 2, 1},
+ {QLCNIC_CMD_CONFIGURE_MAC_LEARNING, 2, 1},
+ {QLCNIC_CMD_INTRPT_TEST, 22, 12},
+ {QLCNIC_CMD_SET_MTU, 3, 1},
+ {QLCNIC_CMD_READ_PHY, 4, 2},
+ {QLCNIC_CMD_WRITE_PHY, 5, 1},
+ {QLCNIC_CMD_READ_HW_REG, 4, 1},
+ {QLCNIC_CMD_GET_FLOW_CTL, 4, 2},
+ {QLCNIC_CMD_SET_FLOW_CTL, 4, 1},
+ {QLCNIC_CMD_READ_MAX_MTU, 4, 2},
+ {QLCNIC_CMD_READ_MAX_LRO, 4, 2},
+ {QLCNIC_CMD_MAC_ADDRESS, 4, 3},
+ {QLCNIC_CMD_GET_PCI_INFO, 1, 129},
+ {QLCNIC_CMD_GET_NIC_INFO, 2, 19},
+ {QLCNIC_CMD_SET_NIC_INFO, 32, 1},
+ {QLCNIC_CMD_GET_ESWITCH_CAPABILITY, 4, 3},
+ {QLCNIC_CMD_TOGGLE_ESWITCH, 4, 1},
+ {QLCNIC_CMD_GET_ESWITCH_STATUS, 4, 3},
+ {QLCNIC_CMD_SET_PORTMIRRORING, 4, 1},
+ {QLCNIC_CMD_CONFIGURE_ESWITCH, 4, 1},
+ {QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG, 4, 3},
+ {QLCNIC_CMD_GET_ESWITCH_STATS, 5, 1},
+ {QLCNIC_CMD_CONFIG_PORT, 4, 1},
+ {QLCNIC_CMD_TEMP_SIZE, 1, 4},
+ {QLCNIC_CMD_GET_TEMP_HDR, 5, 5},
+ {QLCNIC_CMD_GET_LINK_EVENT, 2, 1},
+ {QLCNIC_CMD_CONFIG_MAC_VLAN, 4, 3},
+ {QLCNIC_CMD_CONFIG_INTR_COAL, 6, 1},
+ {QLCNIC_CMD_CONFIGURE_RSS, 14, 1},
+ {QLCNIC_CMD_CONFIGURE_LED, 2, 1},
+ {QLCNIC_CMD_CONFIGURE_MAC_RX_MODE, 2, 1},
+ {QLCNIC_CMD_CONFIGURE_HW_LRO, 2, 1},
+ {QLCNIC_CMD_GET_STATISTICS, 2, 80},
+ {QLCNIC_CMD_SET_PORT_CONFIG, 2, 1},
+ {QLCNIC_CMD_GET_PORT_CONFIG, 2, 2},
+ {QLCNIC_CMD_GET_LINK_STATUS, 2, 4},
+ {QLCNIC_CMD_IDC_ACK, 5, 1},
+ {QLCNIC_CMD_INIT_NIC_FUNC, 3, 1},
+ {QLCNIC_CMD_STOP_NIC_FUNC, 2, 1},
+ {QLCNIC_CMD_SET_LED_CONFIG, 5, 1},
+ {QLCNIC_CMD_GET_LED_CONFIG, 1, 5},
+ {QLCNIC_CMD_83XX_SET_DRV_VER, 4, 1},
+ {QLCNIC_CMD_ADD_RCV_RINGS, 130, 26},
+ {QLCNIC_CMD_CONFIG_VPORT, 4, 4},
+ {QLCNIC_CMD_BC_EVENT_SETUP, 2, 1},
+ {QLCNIC_CMD_DCB_QUERY_CAP, 1, 2},
+ {QLCNIC_CMD_DCB_QUERY_PARAM, 1, 50},
+ {QLCNIC_CMD_SET_INGRESS_ENCAP, 2, 1},
+ {QLCNIC_CMD_83XX_EXTEND_ISCSI_DUMP_CAP, 4, 1},
+};
+
+const u32 qlcnic_83xx_ext_reg_tbl[] = {
+ 0x38CC, /* Global Reset */
+ 0x38F0, /* Wildcard */
+ 0x38FC, /* Informant */
+ 0x3038, /* Host MBX ctrl */
+ 0x303C, /* FW MBX ctrl */
+ 0x355C, /* BOOT LOADER ADDRESS REG */
+ 0x3560, /* BOOT LOADER SIZE REG */
+ 0x3564, /* FW IMAGE ADDR REG */
+ 0x1000, /* MBX intr enable */
+ 0x1200, /* Default Intr mask */
+ 0x1204, /* Default Interrupt ID */
+ 0x3780, /* QLC_83XX_IDC_MAJ_VERSION */
+ 0x3784, /* QLC_83XX_IDC_DEV_STATE */
+ 0x3788, /* QLC_83XX_IDC_DRV_PRESENCE */
+ 0x378C, /* QLC_83XX_IDC_DRV_ACK */
+ 0x3790, /* QLC_83XX_IDC_CTRL */
+ 0x3794, /* QLC_83XX_IDC_DRV_AUDIT */
+ 0x3798, /* QLC_83XX_IDC_MIN_VERSION */
+ 0x379C, /* QLC_83XX_RECOVER_DRV_LOCK */
+ 0x37A0, /* QLC_83XX_IDC_PF_0 */
+ 0x37A4, /* QLC_83XX_IDC_PF_1 */
+ 0x37A8, /* QLC_83XX_IDC_PF_2 */
+ 0x37AC, /* QLC_83XX_IDC_PF_3 */
+ 0x37B0, /* QLC_83XX_IDC_PF_4 */
+ 0x37B4, /* QLC_83XX_IDC_PF_5 */
+ 0x37B8, /* QLC_83XX_IDC_PF_6 */
+ 0x37BC, /* QLC_83XX_IDC_PF_7 */
+ 0x37C0, /* QLC_83XX_IDC_PF_8 */
+ 0x37C4, /* QLC_83XX_IDC_PF_9 */
+ 0x37C8, /* QLC_83XX_IDC_PF_10 */
+ 0x37CC, /* QLC_83XX_IDC_PF_11 */
+ 0x37D0, /* QLC_83XX_IDC_PF_12 */
+ 0x37D4, /* QLC_83XX_IDC_PF_13 */
+ 0x37D8, /* QLC_83XX_IDC_PF_14 */
+ 0x37DC, /* QLC_83XX_IDC_PF_15 */
+ 0x37E0, /* QLC_83XX_IDC_DEV_PARTITION_INFO_1 */
+ 0x37E4, /* QLC_83XX_IDC_DEV_PARTITION_INFO_2 */
+ 0x37F0, /* QLC_83XX_DRV_OP_MODE */
+ 0x37F4, /* QLC_83XX_VNIC_STATE */
+ 0x3868, /* QLC_83XX_DRV_LOCK */
+ 0x386C, /* QLC_83XX_DRV_UNLOCK */
+ 0x3504, /* QLC_83XX_DRV_LOCK_ID */
+ 0x34A4, /* QLC_83XX_ASIC_TEMP */
+};
+
+const u32 qlcnic_83xx_reg_tbl[] = {
+ 0x34A8, /* PEG_HALT_STAT1 */
+ 0x34AC, /* PEG_HALT_STAT2 */
+ 0x34B0, /* FW_HEARTBEAT */
+ 0x3500, /* FLASH LOCK_ID */
+ 0x3528, /* FW_CAPABILITIES */
+ 0x3538, /* Driver active, DRV_REG0 */
+ 0x3540, /* Device state, DRV_REG1 */
+ 0x3544, /* Driver state, DRV_REG2 */
+ 0x3548, /* Driver scratch, DRV_REG3 */
+ 0x354C, /* Device partition info, DRV_REG4 */
+ 0x3524, /* Driver IDC ver, DRV_REG5 */
+ 0x3550, /* FW_VER_MAJOR */
+ 0x3554, /* FW_VER_MINOR */
+ 0x3558, /* FW_VER_SUB */
+ 0x359C, /* NPAR STATE */
+ 0x35FC, /* FW_IMG_VALID */
+ 0x3650, /* CMD_PEG_STATE */
+ 0x373C, /* RCV_PEG_STATE */
+ 0x37B4, /* ASIC TEMP */
+ 0x356C, /* FW API */
+ 0x3570, /* DRV OP MODE */
+ 0x3850, /* FLASH LOCK */
+ 0x3854, /* FLASH UNLOCK */
+};
+
+static struct qlcnic_hardware_ops qlcnic_83xx_hw_ops = {
+ .read_crb = qlcnic_83xx_read_crb,
+ .write_crb = qlcnic_83xx_write_crb,
+ .read_reg = qlcnic_83xx_rd_reg_indirect,
+ .write_reg = qlcnic_83xx_wrt_reg_indirect,
+ .get_mac_address = qlcnic_83xx_get_mac_address,
+ .setup_intr = qlcnic_83xx_setup_intr,
+ .alloc_mbx_args = qlcnic_83xx_alloc_mbx_args,
+ .mbx_cmd = qlcnic_83xx_issue_cmd,
+ .get_func_no = qlcnic_83xx_get_func_no,
+ .api_lock = qlcnic_83xx_cam_lock,
+ .api_unlock = qlcnic_83xx_cam_unlock,
+ .add_sysfs = qlcnic_83xx_add_sysfs,
+ .remove_sysfs = qlcnic_83xx_remove_sysfs,
+ .process_lb_rcv_ring_diag = qlcnic_83xx_process_rcv_ring_diag,
+ .create_rx_ctx = qlcnic_83xx_create_rx_ctx,
+ .create_tx_ctx = qlcnic_83xx_create_tx_ctx,
+ .del_rx_ctx = qlcnic_83xx_del_rx_ctx,
+ .del_tx_ctx = qlcnic_83xx_del_tx_ctx,
+ .setup_link_event = qlcnic_83xx_setup_link_event,
+ .get_nic_info = qlcnic_83xx_get_nic_info,
+ .get_pci_info = qlcnic_83xx_get_pci_info,
+ .set_nic_info = qlcnic_83xx_set_nic_info,
+ .change_macvlan = qlcnic_83xx_sre_macaddr_change,
+ .napi_enable = qlcnic_83xx_napi_enable,
+ .napi_disable = qlcnic_83xx_napi_disable,
+ .config_intr_coal = qlcnic_83xx_config_intr_coal,
+ .config_rss = qlcnic_83xx_config_rss,
+ .config_hw_lro = qlcnic_83xx_config_hw_lro,
+ .config_promisc_mode = qlcnic_83xx_nic_set_promisc,
+ .change_l2_filter = qlcnic_83xx_change_l2_filter,
+ .get_board_info = qlcnic_83xx_get_port_info,
+ .set_mac_filter_count = qlcnic_83xx_set_mac_filter_count,
+ .free_mac_list = qlcnic_82xx_free_mac_list,
+ .io_error_detected = qlcnic_83xx_io_error_detected,
+ .io_slot_reset = qlcnic_83xx_io_slot_reset,
+ .io_resume = qlcnic_83xx_io_resume,
+ .get_beacon_state = qlcnic_83xx_get_beacon_state,
+ .enable_sds_intr = qlcnic_83xx_enable_sds_intr,
+ .disable_sds_intr = qlcnic_83xx_disable_sds_intr,
+ .enable_tx_intr = qlcnic_83xx_enable_tx_intr,
+ .disable_tx_intr = qlcnic_83xx_disable_tx_intr,
+ .get_saved_state = qlcnic_83xx_get_saved_state,
+ .set_saved_state = qlcnic_83xx_set_saved_state,
+ .cache_tmpl_hdr_values = qlcnic_83xx_cache_tmpl_hdr_values,
+ .get_cap_size = qlcnic_83xx_get_cap_size,
+ .set_sys_info = qlcnic_83xx_set_sys_info,
+ .store_cap_mask = qlcnic_83xx_store_cap_mask,
+ .encap_rx_offload = qlcnic_83xx_encap_rx_offload,
+ .encap_tx_offload = qlcnic_83xx_encap_tx_offload,
+};
+
+static struct qlcnic_nic_template qlcnic_83xx_ops = {
+ .config_bridged_mode = qlcnic_config_bridged_mode,
+ .config_led = qlcnic_config_led,
+ .request_reset = qlcnic_83xx_idc_request_reset,
+ .cancel_idc_work = qlcnic_83xx_idc_exit,
+ .napi_add = qlcnic_83xx_napi_add,
+ .napi_del = qlcnic_83xx_napi_del,
+ .config_ipaddr = qlcnic_83xx_config_ipaddr,
+ .clear_legacy_intr = qlcnic_83xx_clear_legacy_intr,
+ .shutdown = qlcnic_83xx_shutdown,
+ .resume = qlcnic_83xx_resume,
+};
+
+void qlcnic_83xx_register_map(struct qlcnic_hardware_context *ahw)
+{
+ ahw->hw_ops = &qlcnic_83xx_hw_ops;
+ ahw->reg_tbl = (u32 *)qlcnic_83xx_reg_tbl;
+ ahw->ext_reg_tbl = (u32 *)qlcnic_83xx_ext_reg_tbl;
+}
+
+int qlcnic_83xx_get_fw_version(struct qlcnic_adapter *adapter)
+{
+ u32 fw_major, fw_minor, fw_build;
+ struct pci_dev *pdev = adapter->pdev;
+
+ fw_major = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MAJOR);
+ fw_minor = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MINOR);
+ fw_build = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_SUB);
+ adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
+
+ dev_info(&pdev->dev, "Driver v%s, firmware version %d.%d.%d\n",
+ QLCNIC_LINUX_VERSIONID, fw_major, fw_minor, fw_build);
+
+ return adapter->fw_version;
+}
+
+static int __qlcnic_set_win_base(struct qlcnic_adapter *adapter, u32 addr)
+{
+ void __iomem *base;
+ u32 val;
+
+ base = adapter->ahw->pci_base0 +
+ QLC_83XX_CRB_WIN_FUNC(adapter->ahw->pci_func);
+ writel(addr, base);
+ val = readl(base);
+ if (val != addr)
+ return -EIO;
+
+ return 0;
+}
+
+int qlcnic_83xx_rd_reg_indirect(struct qlcnic_adapter *adapter, ulong addr,
+ int *err)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ *err = __qlcnic_set_win_base(adapter, (u32) addr);
+ if (!*err) {
+ return QLCRDX(ahw, QLCNIC_WILDCARD);
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "%s failed, addr = 0x%lx\n", __func__, addr);
+ return -EIO;
+ }
+}
+
+int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *adapter, ulong addr,
+ u32 data)
+{
+ int err;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ err = __qlcnic_set_win_base(adapter, (u32) addr);
+ if (!err) {
+ QLCWRX(ahw, QLCNIC_WILDCARD, data);
+ return 0;
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "%s failed, addr = 0x%x data = 0x%x\n",
+ __func__, (int)addr, data);
+ return err;
+ }
+}
+
+static void qlcnic_83xx_enable_legacy(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ /* MSI-X enablement failed, use legacy interrupt */
+ adapter->tgt_status_reg = ahw->pci_base0 + QLC_83XX_INTX_PTR;
+ adapter->tgt_mask_reg = ahw->pci_base0 + QLC_83XX_INTX_MASK;
+ adapter->isr_int_vec = ahw->pci_base0 + QLC_83XX_INTX_TRGR;
+ adapter->msix_entries[0].vector = adapter->pdev->irq;
+ dev_info(&adapter->pdev->dev, "using legacy interrupt\n");
+}
+
+static int qlcnic_83xx_calculate_msix_vector(struct qlcnic_adapter *adapter)
+{
+ int num_msix;
+
+ num_msix = adapter->drv_sds_rings;
+
+ /* account for AEN interrupt MSI-X based interrupts */
+ num_msix += 1;
+
+ if (!(adapter->flags & QLCNIC_TX_INTR_SHARED))
+ num_msix += adapter->drv_tx_rings;
+
+ return num_msix;
+}
+
+int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int err, i, num_msix;
+
+ if (adapter->flags & QLCNIC_TSS_RSS) {
+ err = qlcnic_setup_tss_rss_intr(adapter);
+ if (err < 0)
+ return err;
+ num_msix = ahw->num_msix;
+ } else {
+ num_msix = qlcnic_83xx_calculate_msix_vector(adapter);
+
+ err = qlcnic_enable_msix(adapter, num_msix);
+ if (err == -ENOMEM)
+ return err;
+
+ if (adapter->flags & QLCNIC_MSIX_ENABLED) {
+ num_msix = ahw->num_msix;
+ } else {
+ if (qlcnic_sriov_vf_check(adapter))
+ return -EINVAL;
+ num_msix = 1;
+ adapter->drv_sds_rings = QLCNIC_SINGLE_RING;
+ adapter->drv_tx_rings = QLCNIC_SINGLE_RING;
+ }
+ }
+
+ /* setup interrupt mapping table for fw */
+ ahw->intr_tbl =
+ vzalloc(array_size(num_msix,
+ sizeof(struct qlcnic_intrpt_config)));
+ if (!ahw->intr_tbl)
+ return -ENOMEM;
+
+ if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ if (adapter->ahw->pci_func >= QLC_MAX_LEGACY_FUNC_SUPP) {
+ dev_err(&adapter->pdev->dev, "PCI function number 8 and higher are not supported with legacy interrupt, func 0x%x\n",
+ ahw->pci_func);
+ return -EOPNOTSUPP;
+ }
+
+ qlcnic_83xx_enable_legacy(adapter);
+ }
+
+ for (i = 0; i < num_msix; i++) {
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ ahw->intr_tbl[i].type = QLCNIC_INTRPT_MSIX;
+ else
+ ahw->intr_tbl[i].type = QLCNIC_INTRPT_INTX;
+ ahw->intr_tbl[i].id = i;
+ ahw->intr_tbl[i].src = 0;
+ }
+
+ return 0;
+}
+
+static inline void qlcnic_83xx_clear_legacy_intr_mask(struct qlcnic_adapter *adapter)
+{
+ writel(0, adapter->tgt_mask_reg);
+}
+
+static inline void qlcnic_83xx_set_legacy_intr_mask(struct qlcnic_adapter *adapter)
+{
+ if (adapter->tgt_mask_reg)
+ writel(1, adapter->tgt_mask_reg);
+}
+
+static inline void qlcnic_83xx_enable_legacy_msix_mbx_intr(struct qlcnic_adapter
+ *adapter)
+{
+ u32 mask;
+
+ /* Mailbox in MSI-x mode and Legacy Interrupt share the same
+ * source register. We could be here before contexts are created
+ * and sds_ring->crb_intr_mask has not been initialized, calculate
+ * BAR offset for Interrupt Source Register
+ */
+ mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK);
+ writel(0, adapter->ahw->pci_base0 + mask);
+}
+
+void qlcnic_83xx_disable_mbx_intr(struct qlcnic_adapter *adapter)
+{
+ u32 mask;
+
+ mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK);
+ writel(1, adapter->ahw->pci_base0 + mask);
+ QLCWRX(adapter->ahw, QLCNIC_MBX_INTR_ENBL, 0);
+}
+
+static inline void qlcnic_83xx_get_mbx_data(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ int i;
+
+ if (cmd->op_type == QLC_83XX_MBX_POST_BC_OP)
+ return;
+
+ for (i = 0; i < cmd->rsp.num; i++)
+ cmd->rsp.arg[i] = readl(QLCNIC_MBX_FW(adapter->ahw, i));
+}
+
+irqreturn_t qlcnic_83xx_clear_legacy_intr(struct qlcnic_adapter *adapter)
+{
+ u32 intr_val;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int retries = 0;
+
+ intr_val = readl(adapter->tgt_status_reg);
+
+ if (!QLC_83XX_VALID_INTX_BIT31(intr_val))
+ return IRQ_NONE;
+
+ if (QLC_83XX_INTX_FUNC(intr_val) != adapter->ahw->pci_func) {
+ adapter->stats.spurious_intr++;
+ return IRQ_NONE;
+ }
+ /* The barrier is required to ensure writes to the registers */
+ wmb();
+
+ /* clear the interrupt trigger control register */
+ writel_relaxed(0, adapter->isr_int_vec);
+ intr_val = readl(adapter->isr_int_vec);
+ do {
+ intr_val = readl(adapter->tgt_status_reg);
+ if (QLC_83XX_INTX_FUNC(intr_val) != ahw->pci_func)
+ break;
+ retries++;
+ } while (QLC_83XX_VALID_INTX_BIT30(intr_val) &&
+ (retries < QLC_83XX_LEGACY_INTX_MAX_RETRY));
+
+ return IRQ_HANDLED;
+}
+
+static inline void qlcnic_83xx_notify_mbx_response(struct qlcnic_mailbox *mbx)
+{
+ mbx->rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED;
+ complete(&mbx->completion);
+}
+
+static void qlcnic_83xx_poll_process_aen(struct qlcnic_adapter *adapter)
+{
+ u32 resp, event, rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED;
+ struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mbx->aen_lock, flags);
+ resp = QLCRDX(adapter->ahw, QLCNIC_FW_MBX_CTRL);
+ if (!(resp & QLCNIC_SET_OWNER))
+ goto out;
+
+ event = readl(QLCNIC_MBX_FW(adapter->ahw, 0));
+ if (event & QLCNIC_MBX_ASYNC_EVENT) {
+ __qlcnic_83xx_process_aen(adapter);
+ } else {
+ if (mbx->rsp_status != rsp_status)
+ qlcnic_83xx_notify_mbx_response(mbx);
+ }
+out:
+ qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter);
+ spin_unlock_irqrestore(&mbx->aen_lock, flags);
+}
+
+irqreturn_t qlcnic_83xx_intr(int irq, void *data)
+{
+ struct qlcnic_adapter *adapter = data;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (qlcnic_83xx_clear_legacy_intr(adapter) == IRQ_NONE)
+ return IRQ_NONE;
+
+ qlcnic_83xx_poll_process_aen(adapter);
+
+ if (ahw->diag_test) {
+ if (ahw->diag_test == QLCNIC_INTERRUPT_TEST)
+ ahw->diag_cnt++;
+ qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter);
+ return IRQ_HANDLED;
+ }
+
+ if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
+ qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter);
+ } else {
+ sds_ring = &adapter->recv_ctx->sds_rings[0];
+ napi_schedule(&sds_ring->napi);
+ }
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t qlcnic_83xx_tmp_intr(int irq, void *data)
+{
+ struct qlcnic_host_sds_ring *sds_ring = data;
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ goto done;
+
+ if (adapter->nic_ops->clear_legacy_intr(adapter) == IRQ_NONE)
+ return IRQ_NONE;
+
+done:
+ adapter->ahw->diag_cnt++;
+ qlcnic_enable_sds_intr(adapter, sds_ring);
+
+ return IRQ_HANDLED;
+}
+
+void qlcnic_83xx_free_mbx_intr(struct qlcnic_adapter *adapter)
+{
+ u32 num_msix;
+
+ if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
+ qlcnic_83xx_set_legacy_intr_mask(adapter);
+
+ qlcnic_83xx_disable_mbx_intr(adapter);
+
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ num_msix = adapter->ahw->num_msix - 1;
+ else
+ num_msix = 0;
+
+ msleep(20);
+
+ if (adapter->msix_entries) {
+ synchronize_irq(adapter->msix_entries[num_msix].vector);
+ free_irq(adapter->msix_entries[num_msix].vector, adapter);
+ }
+}
+
+int qlcnic_83xx_setup_mbx_intr(struct qlcnic_adapter *adapter)
+{
+ irq_handler_t handler;
+ u32 val;
+ int err = 0;
+ unsigned long flags = 0;
+
+ if (!(adapter->flags & QLCNIC_MSI_ENABLED) &&
+ !(adapter->flags & QLCNIC_MSIX_ENABLED))
+ flags |= IRQF_SHARED;
+
+ if (adapter->flags & QLCNIC_MSIX_ENABLED) {
+ handler = qlcnic_83xx_handle_aen;
+ val = adapter->msix_entries[adapter->ahw->num_msix - 1].vector;
+ err = request_irq(val, handler, flags, "qlcnic-MB", adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "failed to register MBX interrupt\n");
+ return err;
+ }
+ } else {
+ handler = qlcnic_83xx_intr;
+ val = adapter->msix_entries[0].vector;
+ err = request_irq(val, handler, flags, "qlcnic", adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "failed to register INTx interrupt\n");
+ return err;
+ }
+ qlcnic_83xx_clear_legacy_intr_mask(adapter);
+ }
+
+ /* Enable mailbox interrupt */
+ qlcnic_83xx_enable_mbx_interrupt(adapter);
+
+ return err;
+}
+
+void qlcnic_83xx_get_func_no(struct qlcnic_adapter *adapter)
+{
+ u32 val = QLCRDX(adapter->ahw, QLCNIC_INFORMANT);
+ adapter->ahw->pci_func = (val >> 24) & 0xff;
+}
+
+int qlcnic_83xx_cam_lock(struct qlcnic_adapter *adapter)
+{
+ void __iomem *addr;
+ u32 val, limit = 0;
+
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ addr = ahw->pci_base0 + QLC_83XX_SEM_LOCK_FUNC(ahw->pci_func);
+ do {
+ val = readl(addr);
+ if (val) {
+ /* write the function number to register */
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_FLASH_LOCK_OWNER,
+ ahw->pci_func);
+ return 0;
+ }
+ usleep_range(1000, 2000);
+ } while (++limit <= QLCNIC_PCIE_SEM_TIMEOUT);
+
+ return -EIO;
+}
+
+void qlcnic_83xx_cam_unlock(struct qlcnic_adapter *adapter)
+{
+ void __iomem *addr;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ addr = ahw->pci_base0 + QLC_83XX_SEM_UNLOCK_FUNC(ahw->pci_func);
+ readl(addr);
+}
+
+void qlcnic_83xx_read_crb(struct qlcnic_adapter *adapter, char *buf,
+ loff_t offset, size_t size)
+{
+ int ret = 0;
+ u32 data;
+
+ if (qlcnic_api_lock(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed to acquire lock. addr offset 0x%x\n",
+ __func__, (u32)offset);
+ return;
+ }
+
+ data = QLCRD32(adapter, (u32) offset, &ret);
+ qlcnic_api_unlock(adapter);
+
+ if (ret == -EIO) {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed. addr offset 0x%x\n",
+ __func__, (u32)offset);
+ return;
+ }
+ memcpy(buf, &data, size);
+}
+
+void qlcnic_83xx_write_crb(struct qlcnic_adapter *adapter, char *buf,
+ loff_t offset, size_t size)
+{
+ u32 data;
+
+ memcpy(&data, buf, size);
+ qlcnic_83xx_wrt_reg_indirect(adapter, (u32) offset, data);
+}
+
+int qlcnic_83xx_get_port_info(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int status;
+
+ status = qlcnic_83xx_get_port_config(adapter);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "Get Port Info failed\n");
+ } else {
+
+ if (ahw->port_config & QLC_83XX_10G_CAPABLE) {
+ ahw->port_type = QLCNIC_XGBE;
+ } else if (ahw->port_config & QLC_83XX_10_CAPABLE ||
+ ahw->port_config & QLC_83XX_100_CAPABLE ||
+ ahw->port_config & QLC_83XX_1G_CAPABLE) {
+ ahw->port_type = QLCNIC_GBE;
+ } else {
+ ahw->port_type = QLCNIC_XGBE;
+ }
+
+ if (QLC_83XX_AUTONEG(ahw->port_config))
+ ahw->link_autoneg = AUTONEG_ENABLE;
+
+ }
+ return status;
+}
+
+static void qlcnic_83xx_set_mac_filter_count(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u16 act_pci_fn = ahw->total_nic_func;
+ u16 count;
+
+ ahw->max_mc_count = QLC_83XX_MAX_MC_COUNT;
+ if (act_pci_fn <= 2)
+ count = (QLC_83XX_MAX_UC_COUNT - QLC_83XX_MAX_MC_COUNT) /
+ act_pci_fn;
+ else
+ count = (QLC_83XX_LB_MAX_FILTERS - QLC_83XX_MAX_MC_COUNT) /
+ act_pci_fn;
+ ahw->max_uc_count = count;
+}
+
+void qlcnic_83xx_enable_mbx_interrupt(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ val = BIT_2 | ((adapter->ahw->num_msix - 1) << 8);
+ else
+ val = BIT_2;
+
+ QLCWRX(adapter->ahw, QLCNIC_MBX_INTR_ENBL, val);
+ qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter);
+}
+
+void qlcnic_83xx_check_vf(struct qlcnic_adapter *adapter,
+ const struct pci_device_id *ent)
+{
+ u32 op_mode, priv_level;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ ahw->fw_hal_version = 2;
+ qlcnic_get_func_no(adapter);
+
+ if (qlcnic_sriov_vf_check(adapter)) {
+ qlcnic_sriov_vf_set_ops(adapter);
+ return;
+ }
+
+ /* Determine function privilege level */
+ op_mode = QLCRDX(adapter->ahw, QLC_83XX_DRV_OP_MODE);
+ if (op_mode == QLC_83XX_DEFAULT_OPMODE)
+ priv_level = QLCNIC_MGMT_FUNC;
+ else
+ priv_level = QLC_83XX_GET_FUNC_PRIVILEGE(op_mode,
+ ahw->pci_func);
+
+ if (priv_level == QLCNIC_NON_PRIV_FUNC) {
+ ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
+ dev_info(&adapter->pdev->dev,
+ "HAL Version: %d Non Privileged function\n",
+ ahw->fw_hal_version);
+ adapter->nic_ops = &qlcnic_vf_ops;
+ } else {
+ if (pci_find_ext_capability(adapter->pdev,
+ PCI_EXT_CAP_ID_SRIOV))
+ set_bit(__QLCNIC_SRIOV_CAPABLE, &adapter->state);
+ adapter->nic_ops = &qlcnic_83xx_ops;
+ }
+}
+
+static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
+ u32 data[]);
+static void qlcnic_83xx_handle_idc_comp_aen(struct qlcnic_adapter *adapter,
+ u32 data[]);
+
+void qlcnic_dump_mbx(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ int i;
+
+ if (cmd->op_type == QLC_83XX_MBX_POST_BC_OP)
+ return;
+
+ dev_info(&adapter->pdev->dev,
+ "Host MBX regs(%d)\n", cmd->req.num);
+ for (i = 0; i < cmd->req.num; i++) {
+ if (i && !(i % 8))
+ pr_info("\n");
+ pr_info("%08x ", cmd->req.arg[i]);
+ }
+ pr_info("\n");
+ dev_info(&adapter->pdev->dev,
+ "FW MBX regs(%d)\n", cmd->rsp.num);
+ for (i = 0; i < cmd->rsp.num; i++) {
+ if (i && !(i % 8))
+ pr_info("\n");
+ pr_info("%08x ", cmd->rsp.arg[i]);
+ }
+ pr_info("\n");
+}
+
+static void qlcnic_83xx_poll_for_mbx_completion(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int opcode = LSW(cmd->req.arg[0]);
+ unsigned long max_loops;
+
+ max_loops = cmd->total_cmds * QLC_83XX_MBX_CMD_LOOP;
+
+ for (; max_loops; max_loops--) {
+ if (atomic_read(&cmd->rsp_status) ==
+ QLC_83XX_MBX_RESPONSE_ARRIVED)
+ return;
+
+ udelay(1);
+ }
+
+ dev_err(&adapter->pdev->dev,
+ "%s: Mailbox command timed out, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
+ __func__, opcode, cmd->type, ahw->pci_func, ahw->op_mode);
+ flush_workqueue(ahw->mailbox->work_q);
+ return;
+}
+
+int qlcnic_83xx_issue_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int cmd_type, err, opcode;
+ unsigned long timeout;
+
+ if (!mbx)
+ return -EIO;
+
+ opcode = LSW(cmd->req.arg[0]);
+ cmd_type = cmd->type;
+ err = mbx->ops->enqueue_cmd(adapter, cmd, &timeout);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Mailbox not available, cmd_op=0x%x, cmd_context=0x%x, pci_func=0x%x, op_mode=0x%x\n",
+ __func__, opcode, cmd->type, ahw->pci_func,
+ ahw->op_mode);
+ return err;
+ }
+
+ switch (cmd_type) {
+ case QLC_83XX_MBX_CMD_WAIT:
+ if (!wait_for_completion_timeout(&cmd->completion, timeout)) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Mailbox command timed out, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
+ __func__, opcode, cmd_type, ahw->pci_func,
+ ahw->op_mode);
+ flush_workqueue(mbx->work_q);
+ }
+ break;
+ case QLC_83XX_MBX_CMD_NO_WAIT:
+ return 0;
+ case QLC_83XX_MBX_CMD_BUSY_WAIT:
+ qlcnic_83xx_poll_for_mbx_completion(adapter, cmd);
+ break;
+ default:
+ dev_err(&adapter->pdev->dev,
+ "%s: Invalid mailbox command, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
+ __func__, opcode, cmd_type, ahw->pci_func,
+ ahw->op_mode);
+ qlcnic_83xx_detach_mailbox_work(adapter);
+ }
+
+ return cmd->rsp_opcode;
+}
+
+int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
+ struct qlcnic_adapter *adapter, u32 type)
+{
+ int i, size;
+ u32 temp;
+ const struct qlcnic_mailbox_metadata *mbx_tbl;
+
+ memset(mbx, 0, sizeof(struct qlcnic_cmd_args));
+ mbx_tbl = qlcnic_83xx_mbx_tbl;
+ size = ARRAY_SIZE(qlcnic_83xx_mbx_tbl);
+ for (i = 0; i < size; i++) {
+ if (type == mbx_tbl[i].cmd) {
+ mbx->op_type = QLC_83XX_FW_MBX_CMD;
+ mbx->req.num = mbx_tbl[i].in_args;
+ mbx->rsp.num = mbx_tbl[i].out_args;
+ mbx->req.arg = kcalloc(mbx->req.num, sizeof(u32),
+ GFP_ATOMIC);
+ if (!mbx->req.arg)
+ return -ENOMEM;
+ mbx->rsp.arg = kcalloc(mbx->rsp.num, sizeof(u32),
+ GFP_ATOMIC);
+ if (!mbx->rsp.arg) {
+ kfree(mbx->req.arg);
+ mbx->req.arg = NULL;
+ return -ENOMEM;
+ }
+ temp = adapter->ahw->fw_hal_version << 29;
+ mbx->req.arg[0] = (type | (mbx->req.num << 16) | temp);
+ mbx->cmd_op = type;
+ return 0;
+ }
+ }
+
+ dev_err(&adapter->pdev->dev, "%s: Invalid mailbox command opcode 0x%x\n",
+ __func__, type);
+ return -EINVAL;
+}
+
+void qlcnic_83xx_idc_aen_work(struct work_struct *work)
+{
+ struct qlcnic_adapter *adapter;
+ struct qlcnic_cmd_args cmd;
+ int i, err = 0;
+
+ adapter = container_of(work, struct qlcnic_adapter, idc_aen_work.work);
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_IDC_ACK);
+ if (err)
+ return;
+
+ for (i = 1; i < QLC_83XX_MBX_AEN_CNT; i++)
+ cmd.req.arg[i] = adapter->ahw->mbox_aen[i];
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_info(&adapter->pdev->dev,
+ "%s: Mailbox IDC ACK failed.\n", __func__);
+ qlcnic_free_mbx_args(&cmd);
+}
+
+static void qlcnic_83xx_handle_idc_comp_aen(struct qlcnic_adapter *adapter,
+ u32 data[])
+{
+ dev_dbg(&adapter->pdev->dev, "Completion AEN:0x%x.\n",
+ QLCNIC_MBX_RSP(data[0]));
+ clear_bit(QLC_83XX_IDC_COMP_AEN, &adapter->ahw->idc.status);
+ return;
+}
+
+static void __qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u32 event[QLC_83XX_MBX_AEN_CNT];
+ int i;
+
+ for (i = 0; i < QLC_83XX_MBX_AEN_CNT; i++)
+ event[i] = readl(QLCNIC_MBX_FW(ahw, i));
+
+ switch (QLCNIC_MBX_RSP(event[0])) {
+
+ case QLCNIC_MBX_LINK_EVENT:
+ qlcnic_83xx_handle_link_aen(adapter, event);
+ break;
+ case QLCNIC_MBX_COMP_EVENT:
+ qlcnic_83xx_handle_idc_comp_aen(adapter, event);
+ break;
+ case QLCNIC_MBX_REQUEST_EVENT:
+ for (i = 0; i < QLC_83XX_MBX_AEN_CNT; i++)
+ adapter->ahw->mbox_aen[i] = QLCNIC_MBX_RSP(event[i]);
+ queue_delayed_work(adapter->qlcnic_wq,
+ &adapter->idc_aen_work, 0);
+ break;
+ case QLCNIC_MBX_TIME_EXTEND_EVENT:
+ ahw->extend_lb_time = event[1] >> 8 & 0xf;
+ break;
+ case QLCNIC_MBX_BC_EVENT:
+ qlcnic_sriov_handle_bc_event(adapter, event[1]);
+ break;
+ case QLCNIC_MBX_SFP_INSERT_EVENT:
+ dev_info(&adapter->pdev->dev, "SFP+ Insert AEN:0x%x.\n",
+ QLCNIC_MBX_RSP(event[0]));
+ break;
+ case QLCNIC_MBX_SFP_REMOVE_EVENT:
+ dev_info(&adapter->pdev->dev, "SFP Removed AEN:0x%x.\n",
+ QLCNIC_MBX_RSP(event[0]));
+ break;
+ case QLCNIC_MBX_DCBX_CONFIG_CHANGE_EVENT:
+ qlcnic_dcb_aen_handler(adapter->dcb, (void *)&event[1]);
+ break;
+ default:
+ dev_dbg(&adapter->pdev->dev, "Unsupported AEN:0x%x.\n",
+ QLCNIC_MBX_RSP(event[0]));
+ break;
+ }
+
+ QLCWRX(ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER);
+}
+
+static void qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
+{
+ u32 resp, event, rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_mailbox *mbx = ahw->mailbox;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mbx->aen_lock, flags);
+ resp = QLCRDX(ahw, QLCNIC_FW_MBX_CTRL);
+ if (resp & QLCNIC_SET_OWNER) {
+ event = readl(QLCNIC_MBX_FW(ahw, 0));
+ if (event & QLCNIC_MBX_ASYNC_EVENT) {
+ __qlcnic_83xx_process_aen(adapter);
+ } else {
+ if (mbx->rsp_status != rsp_status)
+ qlcnic_83xx_notify_mbx_response(mbx);
+ }
+ }
+ spin_unlock_irqrestore(&mbx->aen_lock, flags);
+}
+
+static void qlcnic_83xx_mbx_poll_work(struct work_struct *work)
+{
+ struct qlcnic_adapter *adapter;
+
+ adapter = container_of(work, struct qlcnic_adapter, mbx_poll_work.work);
+
+ if (!test_bit(__QLCNIC_MBX_POLL_ENABLE, &adapter->state))
+ return;
+
+ qlcnic_83xx_process_aen(adapter);
+ queue_delayed_work(adapter->qlcnic_wq, &adapter->mbx_poll_work,
+ (HZ / 10));
+}
+
+void qlcnic_83xx_enable_mbx_poll(struct qlcnic_adapter *adapter)
+{
+ if (test_and_set_bit(__QLCNIC_MBX_POLL_ENABLE, &adapter->state))
+ return;
+
+ INIT_DELAYED_WORK(&adapter->mbx_poll_work, qlcnic_83xx_mbx_poll_work);
+ queue_delayed_work(adapter->qlcnic_wq, &adapter->mbx_poll_work, 0);
+}
+
+void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *adapter)
+{
+ if (!test_and_clear_bit(__QLCNIC_MBX_POLL_ENABLE, &adapter->state))
+ return;
+ cancel_delayed_work_sync(&adapter->mbx_poll_work);
+}
+
+static int qlcnic_83xx_add_rings(struct qlcnic_adapter *adapter)
+{
+ int index, i, err, sds_mbx_size;
+ u32 *buf, intrpt_id, intr_mask;
+ u16 context_id;
+ u8 num_sds;
+ struct qlcnic_cmd_args cmd;
+ struct qlcnic_host_sds_ring *sds;
+ struct qlcnic_sds_mbx sds_mbx;
+ struct qlcnic_add_rings_mbx_out *mbx_out;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ sds_mbx_size = sizeof(struct qlcnic_sds_mbx);
+ context_id = recv_ctx->context_id;
+ num_sds = adapter->drv_sds_rings - QLCNIC_MAX_SDS_RINGS;
+ err = ahw->hw_ops->alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_ADD_RCV_RINGS);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to alloc mbx args %d\n", err);
+ return err;
+ }
+
+ cmd.req.arg[1] = 0 | (num_sds << 8) | (context_id << 16);
+
+ /* set up status rings, mbx 2-81 */
+ index = 2;
+ for (i = 8; i < adapter->drv_sds_rings; i++) {
+ memset(&sds_mbx, 0, sds_mbx_size);
+ sds = &recv_ctx->sds_rings[i];
+ sds->consumer = 0;
+ memset(sds->desc_head, 0, STATUS_DESC_RINGSIZE(sds));
+ sds_mbx.phy_addr_low = LSD(sds->phys_addr);
+ sds_mbx.phy_addr_high = MSD(sds->phys_addr);
+ sds_mbx.sds_ring_size = sds->num_desc;
+
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ intrpt_id = ahw->intr_tbl[i].id;
+ else
+ intrpt_id = QLCRDX(ahw, QLCNIC_DEF_INT_ID);
+
+ if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST)
+ sds_mbx.intrpt_id = intrpt_id;
+ else
+ sds_mbx.intrpt_id = 0xffff;
+ sds_mbx.intrpt_val = 0;
+ buf = &cmd.req.arg[index];
+ memcpy(buf, &sds_mbx, sds_mbx_size);
+ index += sds_mbx_size / sizeof(u32);
+ }
+
+ /* send the mailbox command */
+ err = ahw->hw_ops->mbx_cmd(adapter, &cmd);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to add rings %d\n", err);
+ goto out;
+ }
+
+ mbx_out = (struct qlcnic_add_rings_mbx_out *)&cmd.rsp.arg[1];
+ index = 0;
+ /* status descriptor ring */
+ for (i = 8; i < adapter->drv_sds_rings; i++) {
+ sds = &recv_ctx->sds_rings[i];
+ sds->crb_sts_consumer = ahw->pci_base0 +
+ mbx_out->host_csmr[index];
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ intr_mask = ahw->intr_tbl[i].src;
+ else
+ intr_mask = QLCRDX(ahw, QLCNIC_DEF_INT_MASK);
+
+ sds->crb_intr_mask = ahw->pci_base0 + intr_mask;
+ index++;
+ }
+out:
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+void qlcnic_83xx_del_rx_ctx(struct qlcnic_adapter *adapter)
+{
+ int err;
+ u32 temp = 0;
+ struct qlcnic_cmd_args cmd;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+
+ if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_RX_CTX))
+ return;
+
+ if (qlcnic_sriov_pf_check(adapter) || qlcnic_sriov_vf_check(adapter))
+ cmd.req.arg[0] |= (0x3 << 29);
+
+ if (qlcnic_sriov_pf_check(adapter))
+ qlcnic_pf_set_interface_id_del_rx_ctx(adapter, &temp);
+
+ cmd.req.arg[1] = recv_ctx->context_id | temp;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_err(&adapter->pdev->dev,
+ "Failed to destroy rx ctx in firmware\n");
+
+ recv_ctx->state = QLCNIC_HOST_CTX_STATE_FREED;
+ qlcnic_free_mbx_args(&cmd);
+}
+
+int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *adapter)
+{
+ int i, err, index, sds_mbx_size, rds_mbx_size;
+ u8 num_sds, num_rds;
+ u32 *buf, intrpt_id, intr_mask, cap = 0;
+ struct qlcnic_host_sds_ring *sds;
+ struct qlcnic_host_rds_ring *rds;
+ struct qlcnic_sds_mbx sds_mbx;
+ struct qlcnic_rds_mbx rds_mbx;
+ struct qlcnic_cmd_args cmd;
+ struct qlcnic_rcv_mbx_out *mbx_out;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ num_rds = adapter->max_rds_rings;
+
+ if (adapter->drv_sds_rings <= QLCNIC_MAX_SDS_RINGS)
+ num_sds = adapter->drv_sds_rings;
+ else
+ num_sds = QLCNIC_MAX_SDS_RINGS;
+
+ sds_mbx_size = sizeof(struct qlcnic_sds_mbx);
+ rds_mbx_size = sizeof(struct qlcnic_rds_mbx);
+ cap = QLCNIC_CAP0_LEGACY_CONTEXT;
+
+ if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP)
+ cap |= QLC_83XX_FW_CAP_LRO_MSS;
+
+ /* set mailbox hdr and capabilities */
+ err = qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_CREATE_RX_CTX);
+ if (err)
+ return err;
+
+ if (qlcnic_sriov_pf_check(adapter) || qlcnic_sriov_vf_check(adapter))
+ cmd.req.arg[0] |= (0x3 << 29);
+
+ cmd.req.arg[1] = cap;
+ cmd.req.arg[5] = 1 | (num_rds << 5) | (num_sds << 8) |
+ (QLC_83XX_HOST_RDS_MODE_UNIQUE << 16);
+
+ if (qlcnic_sriov_pf_check(adapter))
+ qlcnic_pf_set_interface_id_create_rx_ctx(adapter,
+ &cmd.req.arg[6]);
+ /* set up status rings, mbx 8-57/87 */
+ index = QLC_83XX_HOST_SDS_MBX_IDX;
+ for (i = 0; i < num_sds; i++) {
+ memset(&sds_mbx, 0, sds_mbx_size);
+ sds = &recv_ctx->sds_rings[i];
+ sds->consumer = 0;
+ memset(sds->desc_head, 0, STATUS_DESC_RINGSIZE(sds));
+ sds_mbx.phy_addr_low = LSD(sds->phys_addr);
+ sds_mbx.phy_addr_high = MSD(sds->phys_addr);
+ sds_mbx.sds_ring_size = sds->num_desc;
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ intrpt_id = ahw->intr_tbl[i].id;
+ else
+ intrpt_id = QLCRDX(ahw, QLCNIC_DEF_INT_ID);
+ if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST)
+ sds_mbx.intrpt_id = intrpt_id;
+ else
+ sds_mbx.intrpt_id = 0xffff;
+ sds_mbx.intrpt_val = 0;
+ buf = &cmd.req.arg[index];
+ memcpy(buf, &sds_mbx, sds_mbx_size);
+ index += sds_mbx_size / sizeof(u32);
+ }
+ /* set up receive rings, mbx 88-111/135 */
+ index = QLCNIC_HOST_RDS_MBX_IDX;
+ rds = &recv_ctx->rds_rings[0];
+ rds->producer = 0;
+ memset(&rds_mbx, 0, rds_mbx_size);
+ rds_mbx.phy_addr_reg_low = LSD(rds->phys_addr);
+ rds_mbx.phy_addr_reg_high = MSD(rds->phys_addr);
+ rds_mbx.reg_ring_sz = rds->dma_size;
+ rds_mbx.reg_ring_len = rds->num_desc;
+ /* Jumbo ring */
+ rds = &recv_ctx->rds_rings[1];
+ rds->producer = 0;
+ rds_mbx.phy_addr_jmb_low = LSD(rds->phys_addr);
+ rds_mbx.phy_addr_jmb_high = MSD(rds->phys_addr);
+ rds_mbx.jmb_ring_sz = rds->dma_size;
+ rds_mbx.jmb_ring_len = rds->num_desc;
+ buf = &cmd.req.arg[index];
+ memcpy(buf, &rds_mbx, rds_mbx_size);
+
+ /* send the mailbox command */
+ err = ahw->hw_ops->mbx_cmd(adapter, &cmd);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to create Rx ctx in firmware%d\n", err);
+ goto out;
+ }
+ mbx_out = (struct qlcnic_rcv_mbx_out *)&cmd.rsp.arg[1];
+ recv_ctx->context_id = mbx_out->ctx_id;
+ recv_ctx->state = mbx_out->state;
+ recv_ctx->virt_port = mbx_out->vport_id;
+ dev_info(&adapter->pdev->dev, "Rx Context[%d] Created, state:0x%x\n",
+ recv_ctx->context_id, recv_ctx->state);
+ /* Receive descriptor ring */
+ /* Standard ring */
+ rds = &recv_ctx->rds_rings[0];
+ rds->crb_rcv_producer = ahw->pci_base0 +
+ mbx_out->host_prod[0].reg_buf;
+ /* Jumbo ring */
+ rds = &recv_ctx->rds_rings[1];
+ rds->crb_rcv_producer = ahw->pci_base0 +
+ mbx_out->host_prod[0].jmb_buf;
+ /* status descriptor ring */
+ for (i = 0; i < num_sds; i++) {
+ sds = &recv_ctx->sds_rings[i];
+ sds->crb_sts_consumer = ahw->pci_base0 +
+ mbx_out->host_csmr[i];
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ intr_mask = ahw->intr_tbl[i].src;
+ else
+ intr_mask = QLCRDX(ahw, QLCNIC_DEF_INT_MASK);
+ sds->crb_intr_mask = ahw->pci_base0 + intr_mask;
+ }
+
+ if (adapter->drv_sds_rings > QLCNIC_MAX_SDS_RINGS)
+ err = qlcnic_83xx_add_rings(adapter);
+out:
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+void qlcnic_83xx_del_tx_ctx(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ struct qlcnic_cmd_args cmd;
+ u32 temp = 0;
+
+ if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_TX_CTX))
+ return;
+
+ if (qlcnic_sriov_pf_check(adapter) || qlcnic_sriov_vf_check(adapter))
+ cmd.req.arg[0] |= (0x3 << 29);
+
+ if (qlcnic_sriov_pf_check(adapter))
+ qlcnic_pf_set_interface_id_del_tx_ctx(adapter, &temp);
+
+ cmd.req.arg[1] = tx_ring->ctx_id | temp;
+ if (qlcnic_issue_cmd(adapter, &cmd))
+ dev_err(&adapter->pdev->dev,
+ "Failed to destroy tx ctx in firmware\n");
+ qlcnic_free_mbx_args(&cmd);
+}
+
+int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx, int ring)
+{
+ int err;
+ u16 msix_id;
+ u32 *buf, intr_mask, temp = 0;
+ struct qlcnic_cmd_args cmd;
+ struct qlcnic_tx_mbx mbx;
+ struct qlcnic_tx_mbx_out *mbx_out;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u32 msix_vector;
+
+ /* Reset host resources */
+ tx->producer = 0;
+ tx->sw_consumer = 0;
+ *(tx->hw_consumer) = 0;
+
+ memset(&mbx, 0, sizeof(struct qlcnic_tx_mbx));
+
+ /* setup mailbox inbox registerss */
+ mbx.phys_addr_low = LSD(tx->phys_addr);
+ mbx.phys_addr_high = MSD(tx->phys_addr);
+ mbx.cnsmr_index_low = LSD(tx->hw_cons_phys_addr);
+ mbx.cnsmr_index_high = MSD(tx->hw_cons_phys_addr);
+ mbx.size = tx->num_desc;
+ if (adapter->flags & QLCNIC_MSIX_ENABLED) {
+ if (!(adapter->flags & QLCNIC_TX_INTR_SHARED))
+ msix_vector = adapter->drv_sds_rings + ring;
+ else
+ msix_vector = adapter->drv_sds_rings - 1;
+ msix_id = ahw->intr_tbl[msix_vector].id;
+ } else {
+ msix_id = QLCRDX(ahw, QLCNIC_DEF_INT_ID);
+ }
+
+ if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST)
+ mbx.intr_id = msix_id;
+ else
+ mbx.intr_id = 0xffff;
+ mbx.src = 0;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_TX_CTX);
+ if (err)
+ return err;
+
+ if (qlcnic_sriov_pf_check(adapter) || qlcnic_sriov_vf_check(adapter))
+ cmd.req.arg[0] |= (0x3 << 29);
+
+ if (qlcnic_sriov_pf_check(adapter))
+ qlcnic_pf_set_interface_id_create_tx_ctx(adapter, &temp);
+
+ cmd.req.arg[1] = QLCNIC_CAP0_LEGACY_CONTEXT;
+ cmd.req.arg[5] = QLCNIC_SINGLE_RING | temp;
+
+ buf = &cmd.req.arg[6];
+ memcpy(buf, &mbx, sizeof(struct qlcnic_tx_mbx));
+ /* send the mailbox command*/
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ netdev_err(adapter->netdev,
+ "Failed to create Tx ctx in firmware 0x%x\n", err);
+ goto out;
+ }
+ mbx_out = (struct qlcnic_tx_mbx_out *)&cmd.rsp.arg[2];
+ tx->crb_cmd_producer = ahw->pci_base0 + mbx_out->host_prod;
+ tx->ctx_id = mbx_out->ctx_id;
+ if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
+ !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
+ intr_mask = ahw->intr_tbl[adapter->drv_sds_rings + ring].src;
+ tx->crb_intr_mask = ahw->pci_base0 + intr_mask;
+ }
+ netdev_info(adapter->netdev,
+ "Tx Context[0x%x] Created, state:0x%x\n",
+ tx->ctx_id, mbx_out->state);
+out:
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test,
+ u8 num_sds_ring)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_rds_ring *rds_ring;
+ u16 adapter_state = adapter->is_up;
+ u8 ring;
+ int ret;
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev))
+ __qlcnic_down(adapter, netdev);
+
+ qlcnic_detach(adapter);
+
+ adapter->drv_sds_rings = QLCNIC_SINGLE_RING;
+ adapter->ahw->diag_test = test;
+ adapter->ahw->linkup = 0;
+
+ ret = qlcnic_attach(adapter);
+ if (ret) {
+ netif_device_attach(netdev);
+ return ret;
+ }
+
+ ret = qlcnic_fw_create_ctx(adapter);
+ if (ret) {
+ qlcnic_detach(adapter);
+ if (adapter_state == QLCNIC_ADAPTER_UP_MAGIC) {
+ adapter->drv_sds_rings = num_sds_ring;
+ qlcnic_attach(adapter);
+ }
+ netif_device_attach(netdev);
+ return ret;
+ }
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &adapter->recv_ctx->rds_rings[ring];
+ qlcnic_post_rx_buffers(adapter, rds_ring, ring);
+ }
+
+ if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+ sds_ring = &adapter->recv_ctx->sds_rings[ring];
+ qlcnic_enable_sds_intr(adapter, sds_ring);
+ }
+ }
+
+ if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
+ adapter->ahw->loopback_state = 0;
+ adapter->ahw->hw_ops->setup_link_event(adapter, 1);
+ }
+
+ set_bit(__QLCNIC_DEV_UP, &adapter->state);
+ return 0;
+}
+
+static void qlcnic_83xx_diag_free_res(struct net_device *netdev,
+ u8 drv_sds_rings)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_host_sds_ring *sds_ring;
+ int ring;
+
+ clear_bit(__QLCNIC_DEV_UP, &adapter->state);
+ if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+ sds_ring = &adapter->recv_ctx->sds_rings[ring];
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ qlcnic_disable_sds_intr(adapter, sds_ring);
+ }
+ }
+
+ qlcnic_fw_destroy_ctx(adapter);
+ qlcnic_detach(adapter);
+
+ adapter->ahw->diag_test = 0;
+ adapter->drv_sds_rings = drv_sds_rings;
+
+ if (qlcnic_attach(adapter))
+ goto out;
+
+ if (netif_running(netdev))
+ __qlcnic_up(adapter, netdev);
+
+out:
+ netif_device_attach(netdev);
+}
+
+static void qlcnic_83xx_get_beacon_state(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_cmd_args cmd;
+ u8 beacon_state;
+ int err = 0;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LED_CONFIG);
+ if (!err) {
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (!err) {
+ beacon_state = cmd.rsp.arg[4];
+ if (beacon_state == QLCNIC_BEACON_DISABLE)
+ ahw->beacon_state = QLC_83XX_BEACON_OFF;
+ else if (beacon_state == QLC_83XX_ENABLE_BEACON)
+ ahw->beacon_state = QLC_83XX_BEACON_ON;
+ }
+ } else {
+ netdev_err(adapter->netdev, "Get beacon state failed, err=%d\n",
+ err);
+ }
+
+ qlcnic_free_mbx_args(&cmd);
+
+ return;
+}
+
+int qlcnic_83xx_config_led(struct qlcnic_adapter *adapter, u32 state,
+ u32 beacon)
+{
+ struct qlcnic_cmd_args cmd;
+ u32 mbx_in;
+ int i, status = 0;
+
+ if (state) {
+ /* Get LED configuration */
+ status = qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_GET_LED_CONFIG);
+ if (status)
+ return status;
+
+ status = qlcnic_issue_cmd(adapter, &cmd);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "Get led config failed.\n");
+ goto mbx_err;
+ } else {
+ for (i = 0; i < 4; i++)
+ adapter->ahw->mbox_reg[i] = cmd.rsp.arg[i+1];
+ }
+ qlcnic_free_mbx_args(&cmd);
+ /* Set LED Configuration */
+ mbx_in = (LSW(QLC_83XX_LED_CONFIG) << 16) |
+ LSW(QLC_83XX_LED_CONFIG);
+ status = qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_SET_LED_CONFIG);
+ if (status)
+ return status;
+
+ cmd.req.arg[1] = mbx_in;
+ cmd.req.arg[2] = mbx_in;
+ cmd.req.arg[3] = mbx_in;
+ if (beacon)
+ cmd.req.arg[4] = QLC_83XX_ENABLE_BEACON;
+ status = qlcnic_issue_cmd(adapter, &cmd);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "Set led config failed.\n");
+ }
+mbx_err:
+ qlcnic_free_mbx_args(&cmd);
+ return status;
+
+ } else {
+ /* Restoring default LED configuration */
+ status = qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_SET_LED_CONFIG);
+ if (status)
+ return status;
+
+ cmd.req.arg[1] = adapter->ahw->mbox_reg[0];
+ cmd.req.arg[2] = adapter->ahw->mbox_reg[1];
+ cmd.req.arg[3] = adapter->ahw->mbox_reg[2];
+ if (beacon)
+ cmd.req.arg[4] = adapter->ahw->mbox_reg[3];
+ status = qlcnic_issue_cmd(adapter, &cmd);
+ if (status)
+ dev_err(&adapter->pdev->dev,
+ "Restoring led config failed.\n");
+ qlcnic_free_mbx_args(&cmd);
+ return status;
+ }
+}
+
+int qlcnic_83xx_set_led(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int err = -EIO, active = 1;
+
+ if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
+ netdev_warn(netdev,
+ "LED test is not supported in non-privileged mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state))
+ return -EBUSY;
+
+ if (test_bit(__QLCNIC_RESETTING, &adapter->state))
+ break;
+
+ err = qlcnic_83xx_config_led(adapter, active, 0);
+ if (err)
+ netdev_err(netdev, "Failed to set LED blink state\n");
+ break;
+ case ETHTOOL_ID_INACTIVE:
+ active = 0;
+
+ if (test_bit(__QLCNIC_RESETTING, &adapter->state))
+ break;
+
+ err = qlcnic_83xx_config_led(adapter, active, 0);
+ if (err)
+ netdev_err(netdev, "Failed to reset LED blink state\n");
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (!active || err)
+ clear_bit(__QLCNIC_LED_ENABLE, &adapter->state);
+
+ return err;
+}
+
+void qlcnic_83xx_initialize_nic(struct qlcnic_adapter *adapter, int enable)
+{
+ struct qlcnic_cmd_args cmd;
+ int status;
+
+ if (qlcnic_sriov_vf_check(adapter))
+ return;
+
+ if (enable)
+ status = qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_INIT_NIC_FUNC);
+ else
+ status = qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_STOP_NIC_FUNC);
+
+ if (status)
+ return;
+
+ cmd.req.arg[1] = QLC_REGISTER_LB_IDC | QLC_INIT_FW_RESOURCES;
+
+ if (adapter->dcb)
+ cmd.req.arg[1] |= QLC_REGISTER_DCB_AEN;
+
+ status = qlcnic_issue_cmd(adapter, &cmd);
+ if (status)
+ dev_err(&adapter->pdev->dev,
+ "Failed to %s in NIC IDC function event.\n",
+ (enable ? "register" : "unregister"));
+
+ qlcnic_free_mbx_args(&cmd);
+}
+
+static int qlcnic_83xx_set_port_config(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_cmd_args cmd;
+ int err;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_PORT_CONFIG);
+ if (err)
+ return err;
+
+ cmd.req.arg[1] = adapter->ahw->port_config;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_info(&adapter->pdev->dev, "Set Port Config failed.\n");
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+static int qlcnic_83xx_get_port_config(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_cmd_args cmd;
+ int err;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PORT_CONFIG);
+ if (err)
+ return err;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_info(&adapter->pdev->dev, "Get Port config failed\n");
+ else
+ adapter->ahw->port_config = cmd.rsp.arg[1];
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+int qlcnic_83xx_setup_link_event(struct qlcnic_adapter *adapter, int enable)
+{
+ int err;
+ u32 temp;
+ struct qlcnic_cmd_args cmd;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LINK_EVENT);
+ if (err)
+ return err;
+
+ temp = adapter->recv_ctx->context_id << 16;
+ cmd.req.arg[1] = (enable ? 1 : 0) | BIT_8 | temp;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_info(&adapter->pdev->dev,
+ "Setup linkevent mailbox failed\n");
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+static void qlcnic_83xx_set_interface_id_promisc(struct qlcnic_adapter *adapter,
+ u32 *interface_id)
+{
+ if (qlcnic_sriov_pf_check(adapter)) {
+ qlcnic_alloc_lb_filters_mem(adapter);
+ qlcnic_pf_set_interface_id_promisc(adapter, interface_id);
+ adapter->rx_mac_learn = true;
+ } else {
+ if (!qlcnic_sriov_vf_check(adapter))
+ *interface_id = adapter->recv_ctx->context_id << 16;
+ }
+}
+
+int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
+{
+ struct qlcnic_cmd_args *cmd = NULL;
+ u32 temp = 0;
+ int err;
+
+ if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
+ return -EIO;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
+ if (!cmd)
+ return -ENOMEM;
+
+ err = qlcnic_alloc_mbx_args(cmd, adapter,
+ QLCNIC_CMD_CONFIGURE_MAC_RX_MODE);
+ if (err)
+ goto out;
+
+ cmd->type = QLC_83XX_MBX_CMD_NO_WAIT;
+ qlcnic_83xx_set_interface_id_promisc(adapter, &temp);
+
+ if (qlcnic_84xx_check(adapter) && qlcnic_sriov_pf_check(adapter))
+ mode = VPORT_MISS_MODE_ACCEPT_ALL;
+
+ cmd->req.arg[1] = mode | temp;
+ err = qlcnic_issue_cmd(adapter, cmd);
+ if (!err)
+ return err;
+
+ qlcnic_free_mbx_args(cmd);
+
+out:
+ kfree(cmd);
+ return err;
+}
+
+int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u8 drv_sds_rings = adapter->drv_sds_rings;
+ u8 drv_tx_rings = adapter->drv_tx_rings;
+ int ret = 0, loop = 0;
+
+ if (ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
+ netdev_warn(netdev,
+ "Loopback test not supported in non privileged mode\n");
+ return -ENOTSUPP;
+ }
+
+ if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
+ netdev_info(netdev, "Device is resetting\n");
+ return -EBUSY;
+ }
+
+ if (qlcnic_get_diag_lock(adapter)) {
+ netdev_info(netdev, "Device is in diagnostics mode\n");
+ return -EBUSY;
+ }
+
+ netdev_info(netdev, "%s loopback test in progress\n",
+ mode == QLCNIC_ILB_MODE ? "internal" : "external");
+
+ ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST,
+ drv_sds_rings);
+ if (ret)
+ goto fail_diag_alloc;
+
+ ret = qlcnic_83xx_set_lb_mode(adapter, mode);
+ if (ret)
+ goto free_diag_res;
+
+ /* Poll for link up event before running traffic */
+ do {
+ msleep(QLC_83XX_LB_MSLEEP_COUNT);
+
+ if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
+ netdev_info(netdev,
+ "Device is resetting, free LB test resources\n");
+ ret = -EBUSY;
+ goto free_diag_res;
+ }
+ if (loop++ > QLC_83XX_LB_WAIT_COUNT) {
+ netdev_info(netdev,
+ "Firmware didn't sent link up event to loopback request\n");
+ ret = -ETIMEDOUT;
+ qlcnic_83xx_clear_lb_mode(adapter, mode);
+ goto free_diag_res;
+ }
+ } while ((adapter->ahw->linkup && ahw->has_link_events) != 1);
+
+ ret = qlcnic_do_lb_test(adapter, mode);
+
+ qlcnic_83xx_clear_lb_mode(adapter, mode);
+
+free_diag_res:
+ qlcnic_83xx_diag_free_res(netdev, drv_sds_rings);
+
+fail_diag_alloc:
+ adapter->drv_sds_rings = drv_sds_rings;
+ adapter->drv_tx_rings = drv_tx_rings;
+ qlcnic_release_diag_lock(adapter);
+ return ret;
+}
+
+static void qlcnic_extend_lb_idc_cmpltn_wait(struct qlcnic_adapter *adapter,
+ u32 *max_wait_count)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int temp;
+
+ netdev_info(adapter->netdev, "Received loopback IDC time extend event for 0x%x seconds\n",
+ ahw->extend_lb_time);
+ temp = ahw->extend_lb_time * 1000;
+ *max_wait_count += temp / QLC_83XX_LB_MSLEEP_COUNT;
+ ahw->extend_lb_time = 0;
+}
+
+static int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct net_device *netdev = adapter->netdev;
+ u32 config, max_wait_count;
+ int status = 0, loop = 0;
+
+ ahw->extend_lb_time = 0;
+ max_wait_count = QLC_83XX_LB_WAIT_COUNT;
+ status = qlcnic_83xx_get_port_config(adapter);
+ if (status)
+ return status;
+
+ config = ahw->port_config;
+
+ /* Check if port is already in loopback mode */
+ if ((config & QLC_83XX_CFG_LOOPBACK_HSS) ||
+ (config & QLC_83XX_CFG_LOOPBACK_EXT)) {
+ netdev_err(netdev,
+ "Port already in Loopback mode.\n");
+ return -EINPROGRESS;
+ }
+
+ set_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
+
+ if (mode == QLCNIC_ILB_MODE)
+ ahw->port_config |= QLC_83XX_CFG_LOOPBACK_HSS;
+ if (mode == QLCNIC_ELB_MODE)
+ ahw->port_config |= QLC_83XX_CFG_LOOPBACK_EXT;
+
+ status = qlcnic_83xx_set_port_config(adapter);
+ if (status) {
+ netdev_err(netdev,
+ "Failed to Set Loopback Mode = 0x%x.\n",
+ ahw->port_config);
+ ahw->port_config = config;
+ clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
+ return status;
+ }
+
+ /* Wait for Link and IDC Completion AEN */
+ do {
+ msleep(QLC_83XX_LB_MSLEEP_COUNT);
+
+ if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
+ netdev_info(netdev,
+ "Device is resetting, free LB test resources\n");
+ clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
+ return -EBUSY;
+ }
+
+ if (ahw->extend_lb_time)
+ qlcnic_extend_lb_idc_cmpltn_wait(adapter,
+ &max_wait_count);
+
+ if (loop++ > max_wait_count) {
+ netdev_err(netdev, "%s: Did not receive loopback IDC completion AEN\n",
+ __func__);
+ clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
+ qlcnic_83xx_clear_lb_mode(adapter, mode);
+ return -ETIMEDOUT;
+ }
+ } while (test_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status));
+
+ qlcnic_sre_macaddr_change(adapter, adapter->mac_addr, 0,
+ QLCNIC_MAC_ADD);
+ return status;
+}
+
+static int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u32 config = ahw->port_config, max_wait_count;
+ struct net_device *netdev = adapter->netdev;
+ int status = 0, loop = 0;
+
+ ahw->extend_lb_time = 0;
+ max_wait_count = QLC_83XX_LB_WAIT_COUNT;
+ set_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
+ if (mode == QLCNIC_ILB_MODE)
+ ahw->port_config &= ~QLC_83XX_CFG_LOOPBACK_HSS;
+ if (mode == QLCNIC_ELB_MODE)
+ ahw->port_config &= ~QLC_83XX_CFG_LOOPBACK_EXT;
+
+ status = qlcnic_83xx_set_port_config(adapter);
+ if (status) {
+ netdev_err(netdev,
+ "Failed to Clear Loopback Mode = 0x%x.\n",
+ ahw->port_config);
+ ahw->port_config = config;
+ clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
+ return status;
+ }
+
+ /* Wait for Link and IDC Completion AEN */
+ do {
+ msleep(QLC_83XX_LB_MSLEEP_COUNT);
+
+ if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
+ netdev_info(netdev,
+ "Device is resetting, free LB test resources\n");
+ clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
+ return -EBUSY;
+ }
+
+ if (ahw->extend_lb_time)
+ qlcnic_extend_lb_idc_cmpltn_wait(adapter,
+ &max_wait_count);
+
+ if (loop++ > max_wait_count) {
+ netdev_err(netdev, "%s: Did not receive loopback IDC completion AEN\n",
+ __func__);
+ clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
+ return -ETIMEDOUT;
+ }
+ } while (test_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status));
+
+ qlcnic_sre_macaddr_change(adapter, adapter->mac_addr, 0,
+ QLCNIC_MAC_DEL);
+ return status;
+}
+
+static void qlcnic_83xx_set_interface_id_ipaddr(struct qlcnic_adapter *adapter,
+ u32 *interface_id)
+{
+ if (qlcnic_sriov_pf_check(adapter)) {
+ qlcnic_pf_set_interface_id_ipaddr(adapter, interface_id);
+ } else {
+ if (!qlcnic_sriov_vf_check(adapter))
+ *interface_id = adapter->recv_ctx->context_id << 16;
+ }
+}
+
+void qlcnic_83xx_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip,
+ int mode)
+{
+ int err;
+ u32 temp = 0, temp_ip;
+ struct qlcnic_cmd_args cmd;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_CONFIGURE_IP_ADDR);
+ if (err)
+ return;
+
+ qlcnic_83xx_set_interface_id_ipaddr(adapter, &temp);
+
+ if (mode == QLCNIC_IP_UP)
+ cmd.req.arg[1] = 1 | temp;
+ else
+ cmd.req.arg[1] = 2 | temp;
+
+ /*
+ * Adapter needs IP address in network byte order.
+ * But hardware mailbox registers go through writel(), hence IP address
+ * gets swapped on big endian architecture.
+ * To negate swapping of writel() on big endian architecture
+ * use swab32(value).
+ */
+
+ temp_ip = swab32(ntohl(ip));
+ memcpy(&cmd.req.arg[2], &temp_ip, sizeof(u32));
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err != QLCNIC_RCODE_SUCCESS)
+ dev_err(&adapter->netdev->dev,
+ "could not notify %s IP 0x%x request\n",
+ (mode == QLCNIC_IP_UP) ? "Add" : "Remove", ip);
+
+ qlcnic_free_mbx_args(&cmd);
+}
+
+int qlcnic_83xx_config_hw_lro(struct qlcnic_adapter *adapter, int mode)
+{
+ int err;
+ u32 temp, arg1;
+ struct qlcnic_cmd_args cmd;
+ int lro_bit_mask;
+
+ lro_bit_mask = (mode ? (BIT_0 | BIT_1 | BIT_2 | BIT_3) : 0);
+
+ if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
+ return 0;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_HW_LRO);
+ if (err)
+ return err;
+
+ temp = adapter->recv_ctx->context_id << 16;
+ arg1 = lro_bit_mask | temp;
+ cmd.req.arg[1] = arg1;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_info(&adapter->pdev->dev, "LRO config failed\n");
+ qlcnic_free_mbx_args(&cmd);
+
+ return err;
+}
+
+int qlcnic_83xx_config_rss(struct qlcnic_adapter *adapter, int enable)
+{
+ int err;
+ u32 word;
+ struct qlcnic_cmd_args cmd;
+ const u64 key[] = { 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL,
+ 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
+ 0x255b0ec26d5a56daULL };
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_RSS);
+ if (err)
+ return err;
+ /*
+ * RSS request:
+ * bits 3-0: Rsvd
+ * 5-4: hash_type_ipv4
+ * 7-6: hash_type_ipv6
+ * 8: enable
+ * 9: use indirection table
+ * 16-31: indirection table mask
+ */
+ word = ((u32)(RSS_HASHTYPE_IP_TCP & 0x3) << 4) |
+ ((u32)(RSS_HASHTYPE_IP_TCP & 0x3) << 6) |
+ ((u32)(enable & 0x1) << 8) |
+ ((0x7ULL) << 16);
+ cmd.req.arg[1] = (adapter->recv_ctx->context_id);
+ cmd.req.arg[2] = word;
+ memcpy(&cmd.req.arg[4], key, sizeof(key));
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+
+ if (err)
+ dev_info(&adapter->pdev->dev, "RSS config failed\n");
+ qlcnic_free_mbx_args(&cmd);
+
+ return err;
+
+}
+
+static void qlcnic_83xx_set_interface_id_macaddr(struct qlcnic_adapter *adapter,
+ u32 *interface_id)
+{
+ if (qlcnic_sriov_pf_check(adapter)) {
+ qlcnic_pf_set_interface_id_macaddr(adapter, interface_id);
+ } else {
+ if (!qlcnic_sriov_vf_check(adapter))
+ *interface_id = adapter->recv_ctx->context_id << 16;
+ }
+}
+
+int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
+ u16 vlan_id, u8 op)
+{
+ struct qlcnic_cmd_args *cmd = NULL;
+ struct qlcnic_macvlan_mbx mv;
+ u32 *buf, temp = 0;
+ int err;
+
+ if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
+ return -EIO;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
+ if (!cmd)
+ return -ENOMEM;
+
+ err = qlcnic_alloc_mbx_args(cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN);
+ if (err)
+ goto out;
+
+ cmd->type = QLC_83XX_MBX_CMD_NO_WAIT;
+
+ if (vlan_id)
+ op = (op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ?
+ QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL;
+
+ cmd->req.arg[1] = op | (1 << 8);
+ qlcnic_83xx_set_interface_id_macaddr(adapter, &temp);
+ cmd->req.arg[1] |= temp;
+ mv.vlan = vlan_id;
+ mv.mac_addr0 = addr[0];
+ mv.mac_addr1 = addr[1];
+ mv.mac_addr2 = addr[2];
+ mv.mac_addr3 = addr[3];
+ mv.mac_addr4 = addr[4];
+ mv.mac_addr5 = addr[5];
+ buf = &cmd->req.arg[2];
+ memcpy(buf, &mv, sizeof(struct qlcnic_macvlan_mbx));
+ err = qlcnic_issue_cmd(adapter, cmd);
+ if (!err)
+ return err;
+
+ qlcnic_free_mbx_args(cmd);
+out:
+ kfree(cmd);
+ return err;
+}
+
+void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr,
+ u16 vlan_id,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ u8 mac[ETH_ALEN];
+ memcpy(&mac, addr, ETH_ALEN);
+ qlcnic_83xx_sre_macaddr_change(adapter, mac, vlan_id, QLCNIC_MAC_ADD);
+}
+
+static void qlcnic_83xx_configure_mac(struct qlcnic_adapter *adapter, u8 *mac,
+ u8 type, struct qlcnic_cmd_args *cmd)
+{
+ switch (type) {
+ case QLCNIC_SET_STATION_MAC:
+ case QLCNIC_SET_FAC_DEF_MAC:
+ memcpy(&cmd->req.arg[2], mac, sizeof(u32));
+ memcpy(&cmd->req.arg[3], &mac[4], sizeof(u16));
+ break;
+ }
+ cmd->req.arg[1] = type;
+}
+
+int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac,
+ u8 function)
+{
+ int err, i;
+ struct qlcnic_cmd_args cmd;
+ u32 mac_low, mac_high;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MAC_ADDRESS);
+ if (err)
+ return err;
+
+ qlcnic_83xx_configure_mac(adapter, mac, QLCNIC_GET_CURRENT_MAC, &cmd);
+ err = qlcnic_issue_cmd(adapter, &cmd);
+
+ if (err == QLCNIC_RCODE_SUCCESS) {
+ mac_low = cmd.rsp.arg[1];
+ mac_high = cmd.rsp.arg[2];
+
+ for (i = 0; i < 2; i++)
+ mac[i] = (u8) (mac_high >> ((1 - i) * 8));
+ for (i = 2; i < 6; i++)
+ mac[i] = (u8) (mac_low >> ((5 - i) * 8));
+ } else {
+ dev_err(&adapter->pdev->dev, "Failed to get mac address%d\n",
+ err);
+ err = -EIO;
+ }
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+static int qlcnic_83xx_set_rx_intr_coal(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal;
+ struct qlcnic_cmd_args cmd;
+ u16 temp;
+ int err;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_INTR_COAL);
+ if (err)
+ return err;
+
+ temp = adapter->recv_ctx->context_id;
+ cmd.req.arg[1] = QLCNIC_INTR_COAL_TYPE_RX | temp << 16;
+ temp = coal->rx_time_us;
+ cmd.req.arg[2] = coal->rx_packets | temp << 16;
+ cmd.req.arg[3] = coal->flag;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err != QLCNIC_RCODE_SUCCESS)
+ netdev_err(adapter->netdev,
+ "failed to set interrupt coalescing parameters\n");
+
+ qlcnic_free_mbx_args(&cmd);
+
+ return err;
+}
+
+static int qlcnic_83xx_set_tx_intr_coal(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal;
+ struct qlcnic_cmd_args cmd;
+ u16 temp;
+ int err;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_INTR_COAL);
+ if (err)
+ return err;
+
+ temp = adapter->tx_ring->ctx_id;
+ cmd.req.arg[1] = QLCNIC_INTR_COAL_TYPE_TX | temp << 16;
+ temp = coal->tx_time_us;
+ cmd.req.arg[2] = coal->tx_packets | temp << 16;
+ cmd.req.arg[3] = coal->flag;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err != QLCNIC_RCODE_SUCCESS)
+ netdev_err(adapter->netdev,
+ "failed to set interrupt coalescing parameters\n");
+
+ qlcnic_free_mbx_args(&cmd);
+
+ return err;
+}
+
+int qlcnic_83xx_set_rx_tx_intr_coal(struct qlcnic_adapter *adapter)
+{
+ int err = 0;
+
+ err = qlcnic_83xx_set_rx_intr_coal(adapter);
+ if (err)
+ netdev_err(adapter->netdev,
+ "failed to set Rx coalescing parameters\n");
+
+ err = qlcnic_83xx_set_tx_intr_coal(adapter);
+ if (err)
+ netdev_err(adapter->netdev,
+ "failed to set Tx coalescing parameters\n");
+
+ return err;
+}
+
+int qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *adapter,
+ struct ethtool_coalesce *ethcoal)
+{
+ struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal;
+ u32 rx_coalesce_usecs, rx_max_frames;
+ u32 tx_coalesce_usecs, tx_max_frames;
+ int err;
+
+ if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
+ return -EIO;
+
+ tx_coalesce_usecs = ethcoal->tx_coalesce_usecs;
+ tx_max_frames = ethcoal->tx_max_coalesced_frames;
+ rx_coalesce_usecs = ethcoal->rx_coalesce_usecs;
+ rx_max_frames = ethcoal->rx_max_coalesced_frames;
+ coal->flag = QLCNIC_INTR_DEFAULT;
+
+ if ((coal->rx_time_us == rx_coalesce_usecs) &&
+ (coal->rx_packets == rx_max_frames)) {
+ coal->type = QLCNIC_INTR_COAL_TYPE_TX;
+ coal->tx_time_us = tx_coalesce_usecs;
+ coal->tx_packets = tx_max_frames;
+ } else if ((coal->tx_time_us == tx_coalesce_usecs) &&
+ (coal->tx_packets == tx_max_frames)) {
+ coal->type = QLCNIC_INTR_COAL_TYPE_RX;
+ coal->rx_time_us = rx_coalesce_usecs;
+ coal->rx_packets = rx_max_frames;
+ } else {
+ coal->type = QLCNIC_INTR_COAL_TYPE_RX_TX;
+ coal->rx_time_us = rx_coalesce_usecs;
+ coal->rx_packets = rx_max_frames;
+ coal->tx_time_us = tx_coalesce_usecs;
+ coal->tx_packets = tx_max_frames;
+ }
+
+ switch (coal->type) {
+ case QLCNIC_INTR_COAL_TYPE_RX:
+ err = qlcnic_83xx_set_rx_intr_coal(adapter);
+ break;
+ case QLCNIC_INTR_COAL_TYPE_TX:
+ err = qlcnic_83xx_set_tx_intr_coal(adapter);
+ break;
+ case QLCNIC_INTR_COAL_TYPE_RX_TX:
+ err = qlcnic_83xx_set_rx_tx_intr_coal(adapter);
+ break;
+ default:
+ err = -EINVAL;
+ netdev_err(adapter->netdev,
+ "Invalid Interrupt coalescing type\n");
+ break;
+ }
+
+ return err;
+}
+
+static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
+ u32 data[])
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u8 link_status, duplex;
+ /* link speed */
+ link_status = LSB(data[3]) & 1;
+ if (link_status) {
+ ahw->link_speed = MSW(data[2]);
+ duplex = LSB(MSW(data[3]));
+ if (duplex)
+ ahw->link_duplex = DUPLEX_FULL;
+ else
+ ahw->link_duplex = DUPLEX_HALF;
+ } else {
+ ahw->link_speed = SPEED_UNKNOWN;
+ ahw->link_duplex = DUPLEX_UNKNOWN;
+ }
+
+ ahw->link_autoneg = MSB(MSW(data[3]));
+ ahw->module_type = MSB(LSW(data[3]));
+ ahw->has_link_events = 1;
+ ahw->lb_mode = data[4] & QLCNIC_LB_MODE_MASK;
+ qlcnic_advert_link_change(adapter, link_status);
+}
+
+static irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data)
+{
+ u32 mask, resp, event, rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED;
+ struct qlcnic_adapter *adapter = data;
+ struct qlcnic_mailbox *mbx;
+ unsigned long flags;
+
+ mbx = adapter->ahw->mailbox;
+ spin_lock_irqsave(&mbx->aen_lock, flags);
+ resp = QLCRDX(adapter->ahw, QLCNIC_FW_MBX_CTRL);
+ if (!(resp & QLCNIC_SET_OWNER))
+ goto out;
+
+ event = readl(QLCNIC_MBX_FW(adapter->ahw, 0));
+ if (event & QLCNIC_MBX_ASYNC_EVENT) {
+ __qlcnic_83xx_process_aen(adapter);
+ } else {
+ if (mbx->rsp_status != rsp_status)
+ qlcnic_83xx_notify_mbx_response(mbx);
+ else
+ adapter->stats.mbx_spurious_intr++;
+ }
+
+out:
+ mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK);
+ writel(0, adapter->ahw->pci_base0 + mask);
+ spin_unlock_irqrestore(&mbx->aen_lock, flags);
+ return IRQ_HANDLED;
+}
+
+int qlcnic_83xx_set_nic_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *nic)
+{
+ int i, err = -EIO;
+ struct qlcnic_cmd_args cmd;
+
+ if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Error, invoked by non management func\n",
+ __func__);
+ return err;
+ }
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO);
+ if (err)
+ return err;
+
+ cmd.req.arg[1] = (nic->pci_func << 16);
+ cmd.req.arg[2] = 0x1 << 16;
+ cmd.req.arg[3] = nic->phys_port | (nic->switch_mode << 16);
+ cmd.req.arg[4] = nic->capabilities;
+ cmd.req.arg[5] = (nic->max_mac_filters & 0xFF) | ((nic->max_mtu) << 16);
+ cmd.req.arg[6] = (nic->max_tx_ques) | ((nic->max_rx_ques) << 16);
+ cmd.req.arg[7] = (nic->min_tx_bw) | ((nic->max_tx_bw) << 16);
+ for (i = 8; i < 32; i++)
+ cmd.req.arg[i] = 0;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+
+ if (err != QLCNIC_RCODE_SUCCESS) {
+ dev_err(&adapter->pdev->dev, "Failed to set nic info%d\n",
+ err);
+ err = -EIO;
+ }
+
+ qlcnic_free_mbx_args(&cmd);
+
+ return err;
+}
+
+int qlcnic_83xx_get_nic_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *npar_info, u8 func_id)
+{
+ int err;
+ u32 temp;
+ u8 op = 0;
+ struct qlcnic_cmd_args cmd;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO);
+ if (err)
+ return err;
+
+ if (func_id != ahw->pci_func) {
+ temp = func_id << 16;
+ cmd.req.arg[1] = op | BIT_31 | temp;
+ } else {
+ cmd.req.arg[1] = ahw->pci_func << 16;
+ }
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ dev_info(&adapter->pdev->dev,
+ "Failed to get nic info %d\n", err);
+ goto out;
+ }
+
+ npar_info->op_type = cmd.rsp.arg[1];
+ npar_info->pci_func = cmd.rsp.arg[2] & 0xFFFF;
+ npar_info->op_mode = (cmd.rsp.arg[2] & 0xFFFF0000) >> 16;
+ npar_info->phys_port = cmd.rsp.arg[3] & 0xFFFF;
+ npar_info->switch_mode = (cmd.rsp.arg[3] & 0xFFFF0000) >> 16;
+ npar_info->capabilities = cmd.rsp.arg[4];
+ npar_info->max_mac_filters = cmd.rsp.arg[5] & 0xFF;
+ npar_info->max_mtu = (cmd.rsp.arg[5] & 0xFFFF0000) >> 16;
+ npar_info->max_tx_ques = cmd.rsp.arg[6] & 0xFFFF;
+ npar_info->max_rx_ques = (cmd.rsp.arg[6] & 0xFFFF0000) >> 16;
+ npar_info->min_tx_bw = cmd.rsp.arg[7] & 0xFFFF;
+ npar_info->max_tx_bw = (cmd.rsp.arg[7] & 0xFFFF0000) >> 16;
+ if (cmd.rsp.arg[8] & 0x1)
+ npar_info->max_bw_reg_offset = (cmd.rsp.arg[8] & 0x7FFE) >> 1;
+ if (cmd.rsp.arg[8] & 0x10000) {
+ temp = (cmd.rsp.arg[8] & 0x7FFE0000) >> 17;
+ npar_info->max_linkspeed_reg_offset = temp;
+ }
+
+ memcpy(ahw->extra_capability, &cmd.rsp.arg[16],
+ sizeof(ahw->extra_capability));
+
+out:
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+int qlcnic_get_pci_func_type(struct qlcnic_adapter *adapter, u16 type,
+ u16 *nic, u16 *fcoe, u16 *iscsi)
+{
+ struct device *dev = &adapter->pdev->dev;
+ int err = 0;
+
+ switch (type) {
+ case QLCNIC_TYPE_NIC:
+ (*nic)++;
+ break;
+ case QLCNIC_TYPE_FCOE:
+ (*fcoe)++;
+ break;
+ case QLCNIC_TYPE_ISCSI:
+ (*iscsi)++;
+ break;
+ default:
+ dev_err(dev, "%s: Unknown PCI type[%x]\n",
+ __func__, type);
+ err = -EIO;
+ }
+
+ return err;
+}
+
+int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_pci_info *pci_info)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct device *dev = &adapter->pdev->dev;
+ u16 nic = 0, fcoe = 0, iscsi = 0;
+ struct qlcnic_cmd_args cmd;
+ int i, err = 0, j = 0;
+ u32 temp;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PCI_INFO);
+ if (err)
+ return err;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+
+ ahw->total_nic_func = 0;
+ if (err == QLCNIC_RCODE_SUCCESS) {
+ ahw->max_pci_func = cmd.rsp.arg[1] & 0xFF;
+ for (i = 2, j = 0; j < ahw->max_vnic_func; j++, pci_info++) {
+ pci_info->id = cmd.rsp.arg[i] & 0xFFFF;
+ pci_info->active = (cmd.rsp.arg[i] & 0xFFFF0000) >> 16;
+ i++;
+ if (!pci_info->active) {
+ i += QLC_SKIP_INACTIVE_PCI_REGS;
+ continue;
+ }
+ pci_info->type = cmd.rsp.arg[i] & 0xFFFF;
+ err = qlcnic_get_pci_func_type(adapter, pci_info->type,
+ &nic, &fcoe, &iscsi);
+ temp = (cmd.rsp.arg[i] & 0xFFFF0000) >> 16;
+ pci_info->default_port = temp;
+ i++;
+ pci_info->tx_min_bw = cmd.rsp.arg[i] & 0xFFFF;
+ temp = (cmd.rsp.arg[i] & 0xFFFF0000) >> 16;
+ pci_info->tx_max_bw = temp;
+ i = i + 2;
+ memcpy(pci_info->mac, &cmd.rsp.arg[i], ETH_ALEN - 2);
+ i++;
+ memcpy(pci_info->mac + sizeof(u32), &cmd.rsp.arg[i], 2);
+ i = i + 3;
+ }
+ } else {
+ dev_err(dev, "Failed to get PCI Info, error = %d\n", err);
+ err = -EIO;
+ }
+
+ ahw->total_nic_func = nic;
+ ahw->total_pci_func = nic + fcoe + iscsi;
+ if (ahw->total_nic_func == 0 || ahw->total_pci_func == 0) {
+ dev_err(dev, "%s: Invalid function count: total nic func[%x], total pci func[%x]\n",
+ __func__, ahw->total_nic_func, ahw->total_pci_func);
+ err = -EIO;
+ }
+ qlcnic_free_mbx_args(&cmd);
+
+ return err;
+}
+
+int qlcnic_83xx_config_intrpt(struct qlcnic_adapter *adapter, bool op_type)
+{
+ int i, index, err;
+ u8 max_ints;
+ u32 val, temp, type;
+ struct qlcnic_cmd_args cmd;
+
+ max_ints = adapter->ahw->num_msix - 1;
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_INTRPT);
+ if (err)
+ return err;
+
+ cmd.req.arg[1] = max_ints;
+
+ if (qlcnic_sriov_vf_check(adapter))
+ cmd.req.arg[1] |= (adapter->ahw->pci_func << 8) | BIT_16;
+
+ for (i = 0, index = 2; i < max_ints; i++) {
+ type = op_type ? QLCNIC_INTRPT_ADD : QLCNIC_INTRPT_DEL;
+ val = type | (adapter->ahw->intr_tbl[i].type << 4);
+ if (adapter->ahw->intr_tbl[i].type == QLCNIC_INTRPT_MSIX)
+ val |= (adapter->ahw->intr_tbl[i].id << 16);
+ cmd.req.arg[index++] = val;
+ }
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to configure interrupts 0x%x\n", err);
+ goto out;
+ }
+
+ max_ints = cmd.rsp.arg[1];
+ for (i = 0, index = 2; i < max_ints; i++, index += 2) {
+ val = cmd.rsp.arg[index];
+ if (LSB(val)) {
+ dev_info(&adapter->pdev->dev,
+ "Can't configure interrupt %d\n",
+ adapter->ahw->intr_tbl[i].id);
+ continue;
+ }
+ if (op_type) {
+ adapter->ahw->intr_tbl[i].id = MSW(val);
+ adapter->ahw->intr_tbl[i].enabled = 1;
+ temp = cmd.rsp.arg[index + 1];
+ adapter->ahw->intr_tbl[i].src = temp;
+ } else {
+ adapter->ahw->intr_tbl[i].id = i;
+ adapter->ahw->intr_tbl[i].enabled = 0;
+ adapter->ahw->intr_tbl[i].src = 0;
+ }
+ }
+out:
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+int qlcnic_83xx_lock_flash(struct qlcnic_adapter *adapter)
+{
+ int id, timeout = 0;
+ u32 status = 0;
+
+ while (status == 0) {
+ status = QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_LOCK);
+ if (status)
+ break;
+
+ if (++timeout >= QLC_83XX_FLASH_LOCK_TIMEOUT) {
+ id = QLC_SHARED_REG_RD32(adapter,
+ QLCNIC_FLASH_LOCK_OWNER);
+ dev_err(&adapter->pdev->dev,
+ "%s: failed, lock held by %d\n", __func__, id);
+ return -EIO;
+ }
+ usleep_range(1000, 2000);
+ }
+
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_FLASH_LOCK_OWNER, adapter->portnum);
+ return 0;
+}
+
+void qlcnic_83xx_unlock_flash(struct qlcnic_adapter *adapter)
+{
+ QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_UNLOCK);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_FLASH_LOCK_OWNER, 0xFF);
+}
+
+int qlcnic_83xx_lockless_flash_read32(struct qlcnic_adapter *adapter,
+ u32 flash_addr, u8 *p_data,
+ int count)
+{
+ u32 word, range, flash_offset, addr = flash_addr, ret;
+ ulong indirect_add, direct_window;
+ int i, err = 0;
+
+ flash_offset = addr & (QLCNIC_FLASH_SECTOR_SIZE - 1);
+ if (addr & 0x3) {
+ dev_err(&adapter->pdev->dev, "Illegal addr = 0x%x\n", addr);
+ return -EIO;
+ }
+
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_DIRECT_WINDOW,
+ (addr & 0xFFFF0000));
+
+ range = flash_offset + (count * sizeof(u32));
+ /* Check if data is spread across multiple sectors */
+ if (range > (QLCNIC_FLASH_SECTOR_SIZE - 1)) {
+
+ /* Multi sector read */
+ for (i = 0; i < count; i++) {
+ indirect_add = QLC_83XX_FLASH_DIRECT_DATA(addr);
+ ret = QLCRD32(adapter, indirect_add, &err);
+ if (err == -EIO)
+ return err;
+
+ word = ret;
+ *(u32 *)p_data = word;
+ p_data = p_data + 4;
+ addr = addr + 4;
+ flash_offset = flash_offset + 4;
+
+ if (flash_offset > (QLCNIC_FLASH_SECTOR_SIZE - 1)) {
+ direct_window = QLC_83XX_FLASH_DIRECT_WINDOW;
+ /* This write is needed once for each sector */
+ qlcnic_83xx_wrt_reg_indirect(adapter,
+ direct_window,
+ (addr));
+ flash_offset = 0;
+ }
+ }
+ } else {
+ /* Single sector read */
+ for (i = 0; i < count; i++) {
+ indirect_add = QLC_83XX_FLASH_DIRECT_DATA(addr);
+ ret = QLCRD32(adapter, indirect_add, &err);
+ if (err == -EIO)
+ return err;
+
+ word = ret;
+ *(u32 *)p_data = word;
+ p_data = p_data + 4;
+ addr = addr + 4;
+ }
+ }
+
+ return 0;
+}
+
+static int qlcnic_83xx_poll_flash_status_reg(struct qlcnic_adapter *adapter)
+{
+ u32 status;
+ int retries = QLC_83XX_FLASH_READ_RETRY_COUNT;
+ int err = 0;
+
+ do {
+ status = QLCRD32(adapter, QLC_83XX_FLASH_STATUS, &err);
+ if (err == -EIO)
+ return err;
+
+ if ((status & QLC_83XX_FLASH_STATUS_READY) ==
+ QLC_83XX_FLASH_STATUS_READY)
+ break;
+
+ usleep_range(1000, 1100);
+ } while (--retries);
+
+ if (!retries)
+ return -EIO;
+
+ return 0;
+}
+
+int qlcnic_83xx_enable_flash_write(struct qlcnic_adapter *adapter)
+{
+ int ret;
+ u32 cmd;
+ cmd = adapter->ahw->fdt.write_statusreg_cmd;
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
+ (QLC_83XX_FLASH_FDT_WRITE_DEF_SIG | cmd));
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA,
+ adapter->ahw->fdt.write_enable_bits);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL,
+ QLC_83XX_FLASH_SECOND_ERASE_MS_VAL);
+ ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+ if (ret)
+ return -EIO;
+
+ return 0;
+}
+
+int qlcnic_83xx_disable_flash_write(struct qlcnic_adapter *adapter)
+{
+ int ret;
+
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
+ (QLC_83XX_FLASH_FDT_WRITE_DEF_SIG |
+ adapter->ahw->fdt.write_statusreg_cmd));
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA,
+ adapter->ahw->fdt.write_disable_bits);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL,
+ QLC_83XX_FLASH_SECOND_ERASE_MS_VAL);
+ ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+ if (ret)
+ return -EIO;
+
+ return 0;
+}
+
+int qlcnic_83xx_read_flash_mfg_id(struct qlcnic_adapter *adapter)
+{
+ int ret, err = 0;
+ u32 mfg_id;
+
+ if (qlcnic_83xx_lock_flash(adapter))
+ return -EIO;
+
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
+ QLC_83XX_FLASH_FDT_READ_MFG_ID_VAL);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL,
+ QLC_83XX_FLASH_READ_CTRL);
+ ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+ if (ret) {
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+
+ mfg_id = QLCRD32(adapter, QLC_83XX_FLASH_RDDATA, &err);
+ if (err == -EIO) {
+ qlcnic_83xx_unlock_flash(adapter);
+ return err;
+ }
+
+ adapter->flash_mfg_id = (mfg_id & 0xFF);
+ qlcnic_83xx_unlock_flash(adapter);
+
+ return 0;
+}
+
+int qlcnic_83xx_read_flash_descriptor_table(struct qlcnic_adapter *adapter)
+{
+ int count, fdt_size, ret = 0;
+
+ fdt_size = sizeof(struct qlcnic_fdt);
+ count = fdt_size / sizeof(u32);
+
+ if (qlcnic_83xx_lock_flash(adapter))
+ return -EIO;
+
+ memset(&adapter->ahw->fdt, 0, fdt_size);
+ ret = qlcnic_83xx_lockless_flash_read32(adapter, QLCNIC_FDT_LOCATION,
+ (u8 *)&adapter->ahw->fdt,
+ count);
+ qlcnic_swap32_buffer((u32 *)&adapter->ahw->fdt, count);
+ qlcnic_83xx_unlock_flash(adapter);
+ return ret;
+}
+
+int qlcnic_83xx_erase_flash_sector(struct qlcnic_adapter *adapter,
+ u32 sector_start_addr)
+{
+ u32 reversed_addr, addr1, addr2, cmd;
+ int ret = -EIO;
+
+ if (qlcnic_83xx_lock_flash(adapter) != 0)
+ return -EIO;
+
+ if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
+ ret = qlcnic_83xx_enable_flash_write(adapter);
+ if (ret) {
+ qlcnic_83xx_unlock_flash(adapter);
+ dev_err(&adapter->pdev->dev,
+ "%s failed at %d\n",
+ __func__, __LINE__);
+ return ret;
+ }
+ }
+
+ ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+ if (ret) {
+ qlcnic_83xx_unlock_flash(adapter);
+ dev_err(&adapter->pdev->dev,
+ "%s: failed at %d\n", __func__, __LINE__);
+ return -EIO;
+ }
+
+ addr1 = (sector_start_addr & 0xFF) << 16;
+ addr2 = (sector_start_addr & 0xFF0000) >> 16;
+ reversed_addr = addr1 | addr2 | (sector_start_addr & 0xFF00);
+
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA,
+ reversed_addr);
+ cmd = QLC_83XX_FLASH_FDT_ERASE_DEF_SIG | adapter->ahw->fdt.erase_cmd;
+ if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id)
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR, cmd);
+ else
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
+ QLC_83XX_FLASH_OEM_ERASE_SIG);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL,
+ QLC_83XX_FLASH_LAST_ERASE_MS_VAL);
+
+ ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+ if (ret) {
+ qlcnic_83xx_unlock_flash(adapter);
+ dev_err(&adapter->pdev->dev,
+ "%s: failed at %d\n", __func__, __LINE__);
+ return -EIO;
+ }
+
+ if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
+ ret = qlcnic_83xx_disable_flash_write(adapter);
+ if (ret) {
+ qlcnic_83xx_unlock_flash(adapter);
+ dev_err(&adapter->pdev->dev,
+ "%s: failed at %d\n", __func__, __LINE__);
+ return ret;
+ }
+ }
+
+ qlcnic_83xx_unlock_flash(adapter);
+
+ return 0;
+}
+
+int qlcnic_83xx_flash_write32(struct qlcnic_adapter *adapter, u32 addr,
+ u32 *p_data)
+{
+ int ret = -EIO;
+ u32 addr1 = 0x00800000 | (addr >> 2);
+
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR, addr1);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA, *p_data);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL,
+ QLC_83XX_FLASH_LAST_ERASE_MS_VAL);
+ ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed at %d\n", __func__, __LINE__);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int qlcnic_83xx_flash_bulk_write(struct qlcnic_adapter *adapter, u32 addr,
+ u32 *p_data, int count)
+{
+ u32 temp;
+ int ret = -EIO, err = 0;
+
+ if ((count < QLC_83XX_FLASH_WRITE_MIN) ||
+ (count > QLC_83XX_FLASH_WRITE_MAX)) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Invalid word count\n", __func__);
+ return -EIO;
+ }
+
+ temp = QLCRD32(adapter, QLC_83XX_FLASH_SPI_CONTROL, &err);
+ if (err == -EIO)
+ return err;
+
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_SPI_CONTROL,
+ (temp | QLC_83XX_FLASH_SPI_CTRL));
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
+ QLC_83XX_FLASH_ADDR_TEMP_VAL);
+
+ /* First DWORD write */
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA, *p_data++);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL,
+ QLC_83XX_FLASH_FIRST_MS_PATTERN);
+ ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed at %d\n", __func__, __LINE__);
+ return -EIO;
+ }
+
+ count--;
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
+ QLC_83XX_FLASH_ADDR_SECOND_TEMP_VAL);
+ /* Second to N-1 DWORD writes */
+ while (count != 1) {
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA,
+ *p_data++);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL,
+ QLC_83XX_FLASH_SECOND_MS_PATTERN);
+ ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed at %d\n", __func__, __LINE__);
+ return -EIO;
+ }
+ count--;
+ }
+
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
+ QLC_83XX_FLASH_ADDR_TEMP_VAL |
+ (addr >> 2));
+ /* Last DWORD write */
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_WRDATA, *p_data++);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL,
+ QLC_83XX_FLASH_LAST_MS_PATTERN);
+ ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed at %d\n", __func__, __LINE__);
+ return -EIO;
+ }
+
+ ret = QLCRD32(adapter, QLC_83XX_FLASH_SPI_STATUS, &err);
+ if (err == -EIO)
+ return err;
+
+ if ((ret & QLC_83XX_FLASH_SPI_CTRL) == QLC_83XX_FLASH_SPI_CTRL) {
+ dev_err(&adapter->pdev->dev, "%s: failed at %d\n",
+ __func__, __LINE__);
+ /* Operation failed, clear error bit */
+ temp = QLCRD32(adapter, QLC_83XX_FLASH_SPI_CONTROL, &err);
+ if (err == -EIO)
+ return err;
+
+ qlcnic_83xx_wrt_reg_indirect(adapter,
+ QLC_83XX_FLASH_SPI_CONTROL,
+ (temp | QLC_83XX_FLASH_SPI_CTRL));
+ }
+
+ return 0;
+}
+
+static void qlcnic_83xx_recover_driver_lock(struct qlcnic_adapter *adapter)
+{
+ u32 val, id;
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK);
+
+ /* Check if recovery need to be performed by the calling function */
+ if ((val & QLC_83XX_DRV_LOCK_RECOVERY_STATUS_MASK) == 0) {
+ val = val & ~0x3F;
+ val = val | ((adapter->portnum << 2) |
+ QLC_83XX_NEED_DRV_LOCK_RECOVERY);
+ QLCWRX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK, val);
+ dev_info(&adapter->pdev->dev,
+ "%s: lock recovery initiated\n", __func__);
+ mdelay(QLC_83XX_DRV_LOCK_RECOVERY_DELAY);
+ val = QLCRDX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK);
+ id = ((val >> 2) & 0xF);
+ if (id == adapter->portnum) {
+ val = val & ~QLC_83XX_DRV_LOCK_RECOVERY_STATUS_MASK;
+ val = val | QLC_83XX_DRV_LOCK_RECOVERY_IN_PROGRESS;
+ QLCWRX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK, val);
+ /* Force release the lock */
+ QLCRDX(adapter->ahw, QLC_83XX_DRV_UNLOCK);
+ /* Clear recovery bits */
+ val = val & ~0x3F;
+ QLCWRX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK, val);
+ dev_info(&adapter->pdev->dev,
+ "%s: lock recovery completed\n", __func__);
+ } else {
+ dev_info(&adapter->pdev->dev,
+ "%s: func %d to resume lock recovery process\n",
+ __func__, id);
+ }
+ } else {
+ dev_info(&adapter->pdev->dev,
+ "%s: lock recovery initiated by other functions\n",
+ __func__);
+ }
+}
+
+int qlcnic_83xx_lock_driver(struct qlcnic_adapter *adapter)
+{
+ u32 lock_alive_counter, val, id, i = 0, status = 0, temp = 0;
+ int max_attempt = 0;
+
+ while (status == 0) {
+ status = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK);
+ if (status)
+ break;
+
+ mdelay(QLC_83XX_DRV_LOCK_WAIT_DELAY);
+ i++;
+
+ if (i == 1)
+ temp = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK_ID);
+
+ if (i == QLC_83XX_DRV_LOCK_WAIT_COUNTER) {
+ val = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK_ID);
+ if (val == temp) {
+ id = val & 0xFF;
+ dev_info(&adapter->pdev->dev,
+ "%s: lock to be recovered from %d\n",
+ __func__, id);
+ qlcnic_83xx_recover_driver_lock(adapter);
+ i = 0;
+ max_attempt++;
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed to get lock\n", __func__);
+ return -EIO;
+ }
+ }
+
+ /* Force exit from while loop after few attempts */
+ if (max_attempt == QLC_83XX_MAX_DRV_LOCK_RECOVERY_ATTEMPT) {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed to get lock\n", __func__);
+ return -EIO;
+ }
+ }
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK_ID);
+ lock_alive_counter = val >> 8;
+ lock_alive_counter++;
+ val = lock_alive_counter << 8 | adapter->portnum;
+ QLCWRX(adapter->ahw, QLC_83XX_DRV_LOCK_ID, val);
+
+ return 0;
+}
+
+void qlcnic_83xx_unlock_driver(struct qlcnic_adapter *adapter)
+{
+ u32 val, lock_alive_counter, id;
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK_ID);
+ id = val & 0xFF;
+ lock_alive_counter = val >> 8;
+
+ if (id != adapter->portnum)
+ dev_err(&adapter->pdev->dev,
+ "%s:Warning func %d is unlocking lock owned by %d\n",
+ __func__, adapter->portnum, id);
+
+ val = (lock_alive_counter << 8) | 0xFF;
+ QLCWRX(adapter->ahw, QLC_83XX_DRV_LOCK_ID, val);
+ QLCRDX(adapter->ahw, QLC_83XX_DRV_UNLOCK);
+}
+
+int qlcnic_ms_mem_write128(struct qlcnic_adapter *adapter, u64 addr,
+ u32 *data, u32 count)
+{
+ int i, j, ret = 0;
+ u32 temp;
+
+ /* Check alignment */
+ if (addr & 0xF)
+ return -EIO;
+
+ mutex_lock(&adapter->ahw->mem_lock);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_HI, 0);
+
+ for (i = 0; i < count; i++, addr += 16) {
+ if (!((ADDR_IN_RANGE(addr, QLCNIC_ADDR_QDR_NET,
+ QLCNIC_ADDR_QDR_NET_MAX)) ||
+ (ADDR_IN_RANGE(addr, QLCNIC_ADDR_DDR_NET,
+ QLCNIC_ADDR_DDR_NET_MAX)))) {
+ mutex_unlock(&adapter->ahw->mem_lock);
+ return -EIO;
+ }
+
+ qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_LO, addr);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_WRTDATA_LO, *data++);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_WRTDATA_HI, *data++);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_WRTDATA_ULO, *data++);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_WRTDATA_UHI, *data++);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_CTRL, QLCNIC_TA_WRITE_ENABLE);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_CTRL, QLCNIC_TA_WRITE_START);
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ temp = qlcnic_ind_rd(adapter, QLCNIC_MS_CTRL);
+
+ if ((temp & TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ /* Status check failure */
+ if (j >= MAX_CTL_CHECK) {
+ printk_ratelimited(KERN_WARNING
+ "MS memory write failed\n");
+ mutex_unlock(&adapter->ahw->mem_lock);
+ return -EIO;
+ }
+ }
+
+ mutex_unlock(&adapter->ahw->mem_lock);
+
+ return ret;
+}
+
+int qlcnic_83xx_flash_read32(struct qlcnic_adapter *adapter, u32 flash_addr,
+ u8 *p_data, int count)
+{
+ u32 word, addr = flash_addr, ret;
+ ulong indirect_addr;
+ int i, err = 0;
+
+ if (qlcnic_83xx_lock_flash(adapter) != 0)
+ return -EIO;
+
+ if (addr & 0x3) {
+ dev_err(&adapter->pdev->dev, "Illegal addr = 0x%x\n", addr);
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+
+ for (i = 0; i < count; i++) {
+ if (qlcnic_83xx_wrt_reg_indirect(adapter,
+ QLC_83XX_FLASH_DIRECT_WINDOW,
+ (addr))) {
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+
+ indirect_addr = QLC_83XX_FLASH_DIRECT_DATA(addr);
+ ret = QLCRD32(adapter, indirect_addr, &err);
+ if (err == -EIO) {
+ qlcnic_83xx_unlock_flash(adapter);
+ return err;
+ }
+
+ word = ret;
+ *(u32 *)p_data = word;
+ p_data = p_data + 4;
+ addr = addr + 4;
+ }
+
+ qlcnic_83xx_unlock_flash(adapter);
+
+ return 0;
+}
+
+void qlcnic_83xx_get_port_type(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_cmd_args cmd;
+ u32 config;
+ int err;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LINK_STATUS);
+ if (err)
+ return;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ dev_info(&adapter->pdev->dev,
+ "Get Link Status Command failed: 0x%x\n", err);
+ goto out;
+ } else {
+ config = cmd.rsp.arg[3];
+
+ switch (QLC_83XX_SFP_MODULE_TYPE(config)) {
+ case QLC_83XX_MODULE_FIBRE_1000BASE_SX:
+ case QLC_83XX_MODULE_FIBRE_1000BASE_LX:
+ case QLC_83XX_MODULE_FIBRE_1000BASE_CX:
+ case QLC_83XX_MODULE_TP_1000BASE_T:
+ ahw->port_type = QLCNIC_GBE;
+ break;
+ default:
+ ahw->port_type = QLCNIC_XGBE;
+ }
+ }
+out:
+ qlcnic_free_mbx_args(&cmd);
+}
+
+int qlcnic_83xx_test_link(struct qlcnic_adapter *adapter)
+{
+ u8 pci_func;
+ int err;
+ u32 config = 0, state;
+ struct qlcnic_cmd_args cmd;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (qlcnic_sriov_vf_check(adapter))
+ pci_func = adapter->portnum;
+ else
+ pci_func = ahw->pci_func;
+
+ state = readl(ahw->pci_base0 + QLC_83XX_LINK_STATE(pci_func));
+ if (!QLC_83xx_FUNC_VAL(state, pci_func)) {
+ dev_info(&adapter->pdev->dev, "link state down\n");
+ return config;
+ }
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_LINK_STATUS);
+ if (err)
+ return err;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ dev_info(&adapter->pdev->dev,
+ "Get Link Status Command failed: 0x%x\n", err);
+ goto out;
+ } else {
+ config = cmd.rsp.arg[1];
+ switch (QLC_83XX_CURRENT_LINK_SPEED(config)) {
+ case QLC_83XX_10M_LINK:
+ ahw->link_speed = SPEED_10;
+ break;
+ case QLC_83XX_100M_LINK:
+ ahw->link_speed = SPEED_100;
+ break;
+ case QLC_83XX_1G_LINK:
+ ahw->link_speed = SPEED_1000;
+ break;
+ case QLC_83XX_10G_LINK:
+ ahw->link_speed = SPEED_10000;
+ break;
+ default:
+ ahw->link_speed = 0;
+ break;
+ }
+ config = cmd.rsp.arg[3];
+ switch (QLC_83XX_SFP_MODULE_TYPE(config)) {
+ case QLC_83XX_MODULE_FIBRE_10GBASE_LRM:
+ case QLC_83XX_MODULE_FIBRE_10GBASE_LR:
+ case QLC_83XX_MODULE_FIBRE_10GBASE_SR:
+ ahw->supported_type = PORT_FIBRE;
+ ahw->port_type = QLCNIC_XGBE;
+ break;
+ case QLC_83XX_MODULE_FIBRE_1000BASE_SX:
+ case QLC_83XX_MODULE_FIBRE_1000BASE_LX:
+ case QLC_83XX_MODULE_FIBRE_1000BASE_CX:
+ ahw->supported_type = PORT_FIBRE;
+ ahw->port_type = QLCNIC_GBE;
+ break;
+ case QLC_83XX_MODULE_TP_1000BASE_T:
+ ahw->supported_type = PORT_TP;
+ ahw->port_type = QLCNIC_GBE;
+ break;
+ case QLC_83XX_MODULE_DA_10GE_PASSIVE_CP:
+ case QLC_83XX_MODULE_DA_10GE_ACTIVE_CP:
+ case QLC_83XX_MODULE_DA_10GE_LEGACY_CP:
+ case QLC_83XX_MODULE_DA_1GE_PASSIVE_CP:
+ ahw->supported_type = PORT_DA;
+ ahw->port_type = QLCNIC_XGBE;
+ break;
+ default:
+ ahw->supported_type = PORT_OTHER;
+ ahw->port_type = QLCNIC_XGBE;
+ }
+ if (config & 1)
+ err = 1;
+ }
+out:
+ qlcnic_free_mbx_args(&cmd);
+ return config;
+}
+
+int qlcnic_83xx_get_link_ksettings(struct qlcnic_adapter *adapter,
+ struct ethtool_link_ksettings *ecmd)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u32 config = 0;
+ int status = 0;
+ u32 supported, advertising;
+
+ if (!test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state)) {
+ /* Get port configuration info */
+ status = qlcnic_83xx_get_port_info(adapter);
+ /* Get Link Status related info */
+ config = qlcnic_83xx_test_link(adapter);
+ ahw->module_type = QLC_83XX_SFP_MODULE_TYPE(config);
+ }
+
+ /* hard code until there is a way to get it from flash */
+ ahw->board_type = QLCNIC_BRDTYPE_83XX_10G;
+
+ if (netif_running(adapter->netdev) && ahw->has_link_events) {
+ ecmd->base.speed = ahw->link_speed;
+ ecmd->base.duplex = ahw->link_duplex;
+ ecmd->base.autoneg = ahw->link_autoneg;
+ } else {
+ ecmd->base.speed = SPEED_UNKNOWN;
+ ecmd->base.duplex = DUPLEX_UNKNOWN;
+ ecmd->base.autoneg = AUTONEG_DISABLE;
+ }
+
+ supported = (SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_10000baseT_Full |
+ SUPPORTED_Autoneg);
+
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ ecmd->link_modes.advertising);
+
+ if (ecmd->base.autoneg == AUTONEG_ENABLE) {
+ if (ahw->port_config & QLC_83XX_10_CAPABLE)
+ advertising |= SUPPORTED_10baseT_Full;
+ if (ahw->port_config & QLC_83XX_100_CAPABLE)
+ advertising |= SUPPORTED_100baseT_Full;
+ if (ahw->port_config & QLC_83XX_1G_CAPABLE)
+ advertising |= SUPPORTED_1000baseT_Full;
+ if (ahw->port_config & QLC_83XX_10G_CAPABLE)
+ advertising |= SUPPORTED_10000baseT_Full;
+ if (ahw->port_config & QLC_83XX_AUTONEG_ENABLE)
+ advertising |= ADVERTISED_Autoneg;
+ } else {
+ switch (ahw->link_speed) {
+ case SPEED_10:
+ advertising = SUPPORTED_10baseT_Full;
+ break;
+ case SPEED_100:
+ advertising = SUPPORTED_100baseT_Full;
+ break;
+ case SPEED_1000:
+ advertising = SUPPORTED_1000baseT_Full;
+ break;
+ case SPEED_10000:
+ advertising = SUPPORTED_10000baseT_Full;
+ break;
+ default:
+ break;
+ }
+
+ }
+
+ switch (ahw->supported_type) {
+ case PORT_FIBRE:
+ supported |= SUPPORTED_FIBRE;
+ advertising |= ADVERTISED_FIBRE;
+ ecmd->base.port = PORT_FIBRE;
+ break;
+ case PORT_TP:
+ supported |= SUPPORTED_TP;
+ advertising |= ADVERTISED_TP;
+ ecmd->base.port = PORT_TP;
+ break;
+ case PORT_DA:
+ supported |= SUPPORTED_FIBRE;
+ advertising |= ADVERTISED_FIBRE;
+ ecmd->base.port = PORT_DA;
+ break;
+ default:
+ supported |= SUPPORTED_FIBRE;
+ advertising |= ADVERTISED_FIBRE;
+ ecmd->base.port = PORT_OTHER;
+ break;
+ }
+ ecmd->base.phy_address = ahw->physical_port;
+
+ ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.advertising,
+ advertising);
+
+ return status;
+}
+
+int qlcnic_83xx_set_link_ksettings(struct qlcnic_adapter *adapter,
+ const struct ethtool_link_ksettings *ecmd)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u32 config = adapter->ahw->port_config;
+ int status = 0;
+
+ /* 83xx devices do not support Half duplex */
+ if (ecmd->base.duplex == DUPLEX_HALF) {
+ netdev_info(adapter->netdev,
+ "Half duplex mode not supported\n");
+ return -EINVAL;
+ }
+
+ if (ecmd->base.autoneg) {
+ ahw->port_config |= QLC_83XX_AUTONEG_ENABLE;
+ ahw->port_config |= (QLC_83XX_100_CAPABLE |
+ QLC_83XX_1G_CAPABLE |
+ QLC_83XX_10G_CAPABLE);
+ } else { /* force speed */
+ ahw->port_config &= ~QLC_83XX_AUTONEG_ENABLE;
+ switch (ecmd->base.speed) {
+ case SPEED_10:
+ ahw->port_config &= ~(QLC_83XX_100_CAPABLE |
+ QLC_83XX_1G_CAPABLE |
+ QLC_83XX_10G_CAPABLE);
+ ahw->port_config |= QLC_83XX_10_CAPABLE;
+ break;
+ case SPEED_100:
+ ahw->port_config &= ~(QLC_83XX_10_CAPABLE |
+ QLC_83XX_1G_CAPABLE |
+ QLC_83XX_10G_CAPABLE);
+ ahw->port_config |= QLC_83XX_100_CAPABLE;
+ break;
+ case SPEED_1000:
+ ahw->port_config &= ~(QLC_83XX_10_CAPABLE |
+ QLC_83XX_100_CAPABLE |
+ QLC_83XX_10G_CAPABLE);
+ ahw->port_config |= QLC_83XX_1G_CAPABLE;
+ break;
+ case SPEED_10000:
+ ahw->port_config &= ~(QLC_83XX_10_CAPABLE |
+ QLC_83XX_100_CAPABLE |
+ QLC_83XX_1G_CAPABLE);
+ ahw->port_config |= QLC_83XX_10G_CAPABLE;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ status = qlcnic_83xx_set_port_config(adapter);
+ if (status) {
+ netdev_info(adapter->netdev,
+ "Failed to Set Link Speed and autoneg.\n");
+ ahw->port_config = config;
+ }
+
+ return status;
+}
+
+static inline u64 *qlcnic_83xx_copy_stats(struct qlcnic_cmd_args *cmd,
+ u64 *data, int index)
+{
+ u32 low, hi;
+ u64 val;
+
+ low = cmd->rsp.arg[index];
+ hi = cmd->rsp.arg[index + 1];
+ val = (((u64) low) | (((u64) hi) << 32));
+ *data++ = val;
+ return data;
+}
+
+static u64 *qlcnic_83xx_fill_stats(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd, u64 *data,
+ int type, int *ret)
+{
+ int err, k, total_regs;
+
+ *ret = 0;
+ err = qlcnic_issue_cmd(adapter, cmd);
+ if (err != QLCNIC_RCODE_SUCCESS) {
+ dev_info(&adapter->pdev->dev,
+ "Error in get statistics mailbox command\n");
+ *ret = -EIO;
+ return data;
+ }
+ total_regs = cmd->rsp.num;
+ switch (type) {
+ case QLC_83XX_STAT_MAC:
+ /* fill in MAC tx counters */
+ for (k = 2; k < 28; k += 2)
+ data = qlcnic_83xx_copy_stats(cmd, data, k);
+ /* skip 24 bytes of reserved area */
+ /* fill in MAC rx counters */
+ for (k += 6; k < 60; k += 2)
+ data = qlcnic_83xx_copy_stats(cmd, data, k);
+ /* skip 24 bytes of reserved area */
+ /* fill in MAC rx frame stats */
+ for (k += 6; k < 80; k += 2)
+ data = qlcnic_83xx_copy_stats(cmd, data, k);
+ /* fill in eSwitch stats */
+ for (; k < total_regs; k += 2)
+ data = qlcnic_83xx_copy_stats(cmd, data, k);
+ break;
+ case QLC_83XX_STAT_RX:
+ for (k = 2; k < 8; k += 2)
+ data = qlcnic_83xx_copy_stats(cmd, data, k);
+ /* skip 8 bytes of reserved data */
+ for (k += 2; k < 24; k += 2)
+ data = qlcnic_83xx_copy_stats(cmd, data, k);
+ /* skip 8 bytes containing RE1FBQ error data */
+ for (k += 2; k < total_regs; k += 2)
+ data = qlcnic_83xx_copy_stats(cmd, data, k);
+ break;
+ case QLC_83XX_STAT_TX:
+ for (k = 2; k < 10; k += 2)
+ data = qlcnic_83xx_copy_stats(cmd, data, k);
+ /* skip 8 bytes of reserved data */
+ for (k += 2; k < total_regs; k += 2)
+ data = qlcnic_83xx_copy_stats(cmd, data, k);
+ break;
+ default:
+ dev_warn(&adapter->pdev->dev, "Unknown get statistics mode\n");
+ *ret = -EIO;
+ }
+ return data;
+}
+
+void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data)
+{
+ struct qlcnic_cmd_args cmd;
+ struct net_device *netdev = adapter->netdev;
+ int ret = 0;
+
+ ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_STATISTICS);
+ if (ret)
+ return;
+ /* Get Tx stats */
+ cmd.req.arg[1] = BIT_1 | (adapter->tx_ring->ctx_id << 16);
+ cmd.rsp.num = QLC_83XX_TX_STAT_REGS;
+ data = qlcnic_83xx_fill_stats(adapter, &cmd, data,
+ QLC_83XX_STAT_TX, &ret);
+ if (ret) {
+ netdev_err(netdev, "Error getting Tx stats\n");
+ goto out;
+ }
+ /* Get MAC stats */
+ cmd.req.arg[1] = BIT_2 | (adapter->portnum << 16);
+ cmd.rsp.num = QLC_83XX_MAC_STAT_REGS;
+ memset(cmd.rsp.arg, 0, sizeof(u32) * cmd.rsp.num);
+ data = qlcnic_83xx_fill_stats(adapter, &cmd, data,
+ QLC_83XX_STAT_MAC, &ret);
+ if (ret) {
+ netdev_err(netdev, "Error getting MAC stats\n");
+ goto out;
+ }
+ /* Get Rx stats */
+ cmd.req.arg[1] = adapter->recv_ctx->context_id << 16;
+ cmd.rsp.num = QLC_83XX_RX_STAT_REGS;
+ memset(cmd.rsp.arg, 0, sizeof(u32) * cmd.rsp.num);
+ data = qlcnic_83xx_fill_stats(adapter, &cmd, data,
+ QLC_83XX_STAT_RX, &ret);
+ if (ret)
+ netdev_err(netdev, "Error getting Rx stats\n");
+out:
+ qlcnic_free_mbx_args(&cmd);
+}
+
+#define QLCNIC_83XX_ADD_PORT0 BIT_0
+#define QLCNIC_83XX_ADD_PORT1 BIT_1
+#define QLCNIC_83XX_EXTENDED_MEM_SIZE 13 /* In MB */
+int qlcnic_83xx_extend_md_capab(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_cmd_args cmd;
+ int err;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_83XX_EXTEND_ISCSI_DUMP_CAP);
+ if (err)
+ return err;
+
+ cmd.req.arg[1] = (QLCNIC_83XX_ADD_PORT0 | QLCNIC_83XX_ADD_PORT1);
+ cmd.req.arg[2] = QLCNIC_83XX_EXTENDED_MEM_SIZE;
+ cmd.req.arg[3] = QLCNIC_83XX_EXTENDED_MEM_SIZE;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_err(&adapter->pdev->dev,
+ "failed to issue extend iSCSI minidump capability\n");
+
+ return err;
+}
+
+int qlcnic_83xx_reg_test(struct qlcnic_adapter *adapter)
+{
+ u32 major, minor, sub;
+
+ major = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MAJOR);
+ minor = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MINOR);
+ sub = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_SUB);
+
+ if (adapter->fw_version != QLCNIC_VERSION_CODE(major, minor, sub)) {
+ dev_info(&adapter->pdev->dev, "%s: Reg test failed\n",
+ __func__);
+ return 1;
+ }
+ return 0;
+}
+
+inline int qlcnic_83xx_get_regs_len(struct qlcnic_adapter *adapter)
+{
+ return (ARRAY_SIZE(qlcnic_83xx_ext_reg_tbl) *
+ sizeof(*adapter->ahw->ext_reg_tbl)) +
+ (ARRAY_SIZE(qlcnic_83xx_reg_tbl) *
+ sizeof(*adapter->ahw->reg_tbl));
+}
+
+int qlcnic_83xx_get_registers(struct qlcnic_adapter *adapter, u32 *regs_buff)
+{
+ int i, j = 0;
+
+ for (i = QLCNIC_DEV_INFO_SIZE + 1;
+ j < ARRAY_SIZE(qlcnic_83xx_reg_tbl); i++, j++)
+ regs_buff[i] = QLC_SHARED_REG_RD32(adapter, j);
+
+ for (j = 0; j < ARRAY_SIZE(qlcnic_83xx_ext_reg_tbl); j++)
+ regs_buff[i++] = QLCRDX(adapter->ahw, j);
+ return i;
+}
+
+int qlcnic_83xx_interrupt_test(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_cmd_args cmd;
+ u8 val, drv_sds_rings = adapter->drv_sds_rings;
+ u8 drv_tx_rings = adapter->drv_tx_rings;
+ u32 data;
+ u16 intrpt_id, id;
+ int ret;
+
+ if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
+ netdev_info(netdev, "Device is resetting\n");
+ return -EBUSY;
+ }
+
+ if (qlcnic_get_diag_lock(adapter)) {
+ netdev_info(netdev, "Device in diagnostics mode\n");
+ return -EBUSY;
+ }
+
+ ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST,
+ drv_sds_rings);
+ if (ret)
+ goto fail_diag_irq;
+
+ ahw->diag_cnt = 0;
+ ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INTRPT_TEST);
+ if (ret)
+ goto fail_mbx_args;
+
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ intrpt_id = ahw->intr_tbl[0].id;
+ else
+ intrpt_id = QLCRDX(ahw, QLCNIC_DEF_INT_ID);
+
+ cmd.req.arg[1] = 1;
+ cmd.req.arg[2] = intrpt_id;
+ cmd.req.arg[3] = BIT_0;
+
+ ret = qlcnic_issue_cmd(adapter, &cmd);
+ data = cmd.rsp.arg[2];
+ id = LSW(data);
+ val = LSB(MSW(data));
+ if (id != intrpt_id)
+ dev_info(&adapter->pdev->dev,
+ "Interrupt generated: 0x%x, requested:0x%x\n",
+ id, intrpt_id);
+ if (val)
+ dev_err(&adapter->pdev->dev,
+ "Interrupt test error: 0x%x\n", val);
+ if (ret)
+ goto done;
+
+ msleep(20);
+ ret = !ahw->diag_cnt;
+
+done:
+ qlcnic_free_mbx_args(&cmd);
+
+fail_mbx_args:
+ qlcnic_83xx_diag_free_res(netdev, drv_sds_rings);
+
+fail_diag_irq:
+ adapter->drv_sds_rings = drv_sds_rings;
+ adapter->drv_tx_rings = drv_tx_rings;
+ qlcnic_release_diag_lock(adapter);
+ return ret;
+}
+
+void qlcnic_83xx_get_pauseparam(struct qlcnic_adapter *adapter,
+ struct ethtool_pauseparam *pause)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int status = 0;
+ u32 config;
+
+ status = qlcnic_83xx_get_port_config(adapter);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Get Pause Config failed\n", __func__);
+ return;
+ }
+ config = ahw->port_config;
+ if (config & QLC_83XX_CFG_STD_PAUSE) {
+ switch (MSW(config)) {
+ case QLC_83XX_TX_PAUSE:
+ pause->tx_pause = 1;
+ break;
+ case QLC_83XX_RX_PAUSE:
+ pause->rx_pause = 1;
+ break;
+ case QLC_83XX_TX_RX_PAUSE:
+ default:
+ /* Backward compatibility for existing
+ * flash definitions
+ */
+ pause->tx_pause = 1;
+ pause->rx_pause = 1;
+ }
+ }
+
+ if (QLC_83XX_AUTONEG(config))
+ pause->autoneg = 1;
+}
+
+int qlcnic_83xx_set_pauseparam(struct qlcnic_adapter *adapter,
+ struct ethtool_pauseparam *pause)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int status = 0;
+ u32 config;
+
+ status = qlcnic_83xx_get_port_config(adapter);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Get Pause Config failed.\n", __func__);
+ return status;
+ }
+ config = ahw->port_config;
+
+ if (ahw->port_type == QLCNIC_GBE) {
+ if (pause->autoneg)
+ ahw->port_config |= QLC_83XX_ENABLE_AUTONEG;
+ if (!pause->autoneg)
+ ahw->port_config &= ~QLC_83XX_ENABLE_AUTONEG;
+ } else if ((ahw->port_type == QLCNIC_XGBE) && (pause->autoneg)) {
+ return -EOPNOTSUPP;
+ }
+
+ if (!(config & QLC_83XX_CFG_STD_PAUSE))
+ ahw->port_config |= QLC_83XX_CFG_STD_PAUSE;
+
+ if (pause->rx_pause && pause->tx_pause) {
+ ahw->port_config |= QLC_83XX_CFG_STD_TX_RX_PAUSE;
+ } else if (pause->rx_pause && !pause->tx_pause) {
+ ahw->port_config &= ~QLC_83XX_CFG_STD_TX_PAUSE;
+ ahw->port_config |= QLC_83XX_CFG_STD_RX_PAUSE;
+ } else if (pause->tx_pause && !pause->rx_pause) {
+ ahw->port_config &= ~QLC_83XX_CFG_STD_RX_PAUSE;
+ ahw->port_config |= QLC_83XX_CFG_STD_TX_PAUSE;
+ } else if (!pause->rx_pause && !pause->tx_pause) {
+ ahw->port_config &= ~(QLC_83XX_CFG_STD_TX_RX_PAUSE |
+ QLC_83XX_CFG_STD_PAUSE);
+ }
+ status = qlcnic_83xx_set_port_config(adapter);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Set Pause Config failed.\n", __func__);
+ ahw->port_config = config;
+ }
+ return status;
+}
+
+static int qlcnic_83xx_read_flash_status_reg(struct qlcnic_adapter *adapter)
+{
+ int ret, err = 0;
+ u32 temp;
+
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
+ QLC_83XX_FLASH_OEM_READ_SIG);
+ qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_CONTROL,
+ QLC_83XX_FLASH_READ_CTRL);
+ ret = qlcnic_83xx_poll_flash_status_reg(adapter);
+ if (ret)
+ return -EIO;
+
+ temp = QLCRD32(adapter, QLC_83XX_FLASH_RDDATA, &err);
+ if (err == -EIO)
+ return err;
+
+ return temp & 0xFF;
+}
+
+int qlcnic_83xx_flash_test(struct qlcnic_adapter *adapter)
+{
+ int status;
+
+ status = qlcnic_83xx_read_flash_status_reg(adapter);
+ if (status == -EIO) {
+ dev_info(&adapter->pdev->dev, "%s: EEPROM test failed.\n",
+ __func__);
+ return 1;
+ }
+ return 0;
+}
+
+static int qlcnic_83xx_shutdown(struct pci_dev *pdev)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+
+ netif_device_detach(netdev);
+ qlcnic_cancel_idc_work(adapter);
+
+ if (netif_running(netdev))
+ qlcnic_down(adapter, netdev);
+
+ qlcnic_83xx_disable_mbx_intr(adapter);
+ cancel_delayed_work_sync(&adapter->idc_aen_work);
+
+ return pci_save_state(pdev);
+}
+
+static int qlcnic_83xx_resume(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlc_83xx_idc *idc = &ahw->idc;
+ int err = 0;
+
+ err = qlcnic_83xx_idc_init(adapter);
+ if (err)
+ return err;
+
+ if (ahw->nic_mode == QLCNIC_VNIC_MODE) {
+ if (ahw->op_mode == QLCNIC_MGMT_FUNC) {
+ qlcnic_83xx_set_vnic_opmode(adapter);
+ } else {
+ err = qlcnic_83xx_check_vnic_state(adapter);
+ if (err)
+ return err;
+ }
+ }
+
+ err = qlcnic_83xx_idc_reattach_driver(adapter);
+ if (err)
+ return err;
+
+ qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state,
+ idc->delay);
+ return err;
+}
+
+void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx)
+{
+ reinit_completion(&mbx->completion);
+ set_bit(QLC_83XX_MBX_READY, &mbx->status);
+}
+
+void qlcnic_83xx_free_mailbox(struct qlcnic_mailbox *mbx)
+{
+ if (!mbx)
+ return;
+
+ destroy_workqueue(mbx->work_q);
+ kfree(mbx);
+}
+
+static inline void
+qlcnic_83xx_notify_cmd_completion(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ atomic_set(&cmd->rsp_status, QLC_83XX_MBX_RESPONSE_ARRIVED);
+
+ if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) {
+ qlcnic_free_mbx_args(cmd);
+ kfree(cmd);
+ return;
+ }
+ complete(&cmd->completion);
+}
+
+static void qlcnic_83xx_flush_mbx_queue(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
+ struct list_head *head = &mbx->cmd_q;
+ struct qlcnic_cmd_args *cmd = NULL;
+
+ spin_lock_bh(&mbx->queue_lock);
+
+ while (!list_empty(head)) {
+ cmd = list_entry(head->next, struct qlcnic_cmd_args, list);
+ dev_info(&adapter->pdev->dev, "%s: Mailbox command 0x%x\n",
+ __func__, cmd->cmd_op);
+ list_del(&cmd->list);
+ mbx->num_cmds--;
+ qlcnic_83xx_notify_cmd_completion(adapter, cmd);
+ }
+
+ spin_unlock_bh(&mbx->queue_lock);
+}
+
+static int qlcnic_83xx_check_mbx_status(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_mailbox *mbx = ahw->mailbox;
+ u32 host_mbx_ctrl;
+
+ if (!test_bit(QLC_83XX_MBX_READY, &mbx->status))
+ return -EBUSY;
+
+ host_mbx_ctrl = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL);
+ if (host_mbx_ctrl) {
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+ ahw->idc.collect_dump = 1;
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static inline void qlcnic_83xx_signal_mbx_cmd(struct qlcnic_adapter *adapter,
+ u8 issue_cmd)
+{
+ if (issue_cmd)
+ QLCWRX(adapter->ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER);
+ else
+ QLCWRX(adapter->ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER);
+}
+
+static void qlcnic_83xx_dequeue_mbx_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
+
+ spin_lock_bh(&mbx->queue_lock);
+
+ list_del(&cmd->list);
+ mbx->num_cmds--;
+
+ spin_unlock_bh(&mbx->queue_lock);
+
+ qlcnic_83xx_notify_cmd_completion(adapter, cmd);
+}
+
+static void qlcnic_83xx_encode_mbx_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ u32 mbx_cmd, fw_hal_version, hdr_size, total_size, tmp;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int i, j;
+
+ if (cmd->op_type != QLC_83XX_MBX_POST_BC_OP) {
+ mbx_cmd = cmd->req.arg[0];
+ writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0));
+ for (i = 1; i < cmd->req.num; i++)
+ writel(cmd->req.arg[i], QLCNIC_MBX_HOST(ahw, i));
+ } else {
+ fw_hal_version = ahw->fw_hal_version;
+ hdr_size = sizeof(struct qlcnic_bc_hdr) / sizeof(u32);
+ total_size = cmd->pay_size + hdr_size;
+ tmp = QLCNIC_CMD_BC_EVENT_SETUP | total_size << 16;
+ mbx_cmd = tmp | fw_hal_version << 29;
+ writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0));
+
+ /* Back channel specific operations bits */
+ mbx_cmd = 0x1 | 1 << 4;
+
+ if (qlcnic_sriov_pf_check(adapter))
+ mbx_cmd |= cmd->func_num << 5;
+
+ writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 1));
+
+ for (i = 2, j = 0; j < hdr_size; i++, j++)
+ writel(*(cmd->hdr++), QLCNIC_MBX_HOST(ahw, i));
+ for (j = 0; j < cmd->pay_size; j++, i++)
+ writel(*(cmd->pay++), QLCNIC_MBX_HOST(ahw, i));
+ }
+}
+
+void qlcnic_83xx_detach_mailbox_work(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
+
+ if (!mbx)
+ return;
+
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+ complete(&mbx->completion);
+ cancel_work_sync(&mbx->work);
+ flush_workqueue(mbx->work_q);
+ qlcnic_83xx_flush_mbx_queue(adapter);
+}
+
+static int qlcnic_83xx_enqueue_mbx_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd,
+ unsigned long *timeout)
+{
+ struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
+
+ if (test_bit(QLC_83XX_MBX_READY, &mbx->status)) {
+ atomic_set(&cmd->rsp_status, QLC_83XX_MBX_RESPONSE_WAIT);
+ init_completion(&cmd->completion);
+ cmd->rsp_opcode = QLC_83XX_MBX_RESPONSE_UNKNOWN;
+
+ spin_lock_bh(&mbx->queue_lock);
+
+ list_add_tail(&cmd->list, &mbx->cmd_q);
+ mbx->num_cmds++;
+ cmd->total_cmds = mbx->num_cmds;
+ *timeout = cmd->total_cmds * QLC_83XX_MBX_TIMEOUT;
+ queue_work(mbx->work_q, &mbx->work);
+
+ spin_unlock_bh(&mbx->queue_lock);
+
+ return 0;
+ }
+
+ return -EBUSY;
+}
+
+static int qlcnic_83xx_check_mac_rcode(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ u8 mac_cmd_rcode;
+ u32 fw_data;
+
+ if (cmd->cmd_op == QLCNIC_CMD_CONFIG_MAC_VLAN) {
+ fw_data = readl(QLCNIC_MBX_FW(adapter->ahw, 2));
+ mac_cmd_rcode = (u8)fw_data;
+ if (mac_cmd_rcode == QLC_83XX_NO_NIC_RESOURCE ||
+ mac_cmd_rcode == QLC_83XX_MAC_PRESENT ||
+ mac_cmd_rcode == QLC_83XX_MAC_ABSENT) {
+ cmd->rsp_opcode = QLCNIC_RCODE_SUCCESS;
+ return QLCNIC_RCODE_SUCCESS;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static void qlcnic_83xx_decode_mbx_rsp(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct device *dev = &adapter->pdev->dev;
+ u8 mbx_err_code;
+ u32 fw_data;
+
+ fw_data = readl(QLCNIC_MBX_FW(ahw, 0));
+ mbx_err_code = QLCNIC_MBX_STATUS(fw_data);
+ qlcnic_83xx_get_mbx_data(adapter, cmd);
+
+ switch (mbx_err_code) {
+ case QLCNIC_MBX_RSP_OK:
+ case QLCNIC_MBX_PORT_RSP_OK:
+ cmd->rsp_opcode = QLCNIC_RCODE_SUCCESS;
+ break;
+ default:
+ if (!qlcnic_83xx_check_mac_rcode(adapter, cmd))
+ break;
+
+ dev_err(dev, "%s: Mailbox command failed, opcode=0x%x, cmd_type=0x%x, func=0x%x, op_mode=0x%x, error=0x%x\n",
+ __func__, cmd->cmd_op, cmd->type, ahw->pci_func,
+ ahw->op_mode, mbx_err_code);
+ cmd->rsp_opcode = QLC_83XX_MBX_RESPONSE_FAILED;
+ qlcnic_dump_mbx(adapter, cmd);
+ }
+
+ return;
+}
+
+static inline void qlcnic_dump_mailbox_registers(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u32 offset;
+
+ offset = QLCRDX(ahw, QLCNIC_DEF_INT_MASK);
+ dev_info(&adapter->pdev->dev, "Mbx interrupt mask=0x%x, Mbx interrupt enable=0x%x, Host mbx control=0x%x, Fw mbx control=0x%x",
+ readl(ahw->pci_base0 + offset),
+ QLCRDX(ahw, QLCNIC_MBX_INTR_ENBL),
+ QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL),
+ QLCRDX(ahw, QLCNIC_FW_MBX_CTRL));
+}
+
+static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
+{
+ struct qlcnic_mailbox *mbx = container_of(work, struct qlcnic_mailbox,
+ work);
+ struct qlcnic_adapter *adapter = mbx->adapter;
+ const struct qlcnic_mbx_ops *mbx_ops = mbx->ops;
+ struct device *dev = &adapter->pdev->dev;
+ struct list_head *head = &mbx->cmd_q;
+ struct qlcnic_hardware_context *ahw;
+ struct qlcnic_cmd_args *cmd = NULL;
+ unsigned long flags;
+
+ ahw = adapter->ahw;
+
+ while (true) {
+ if (qlcnic_83xx_check_mbx_status(adapter)) {
+ qlcnic_83xx_flush_mbx_queue(adapter);
+ return;
+ }
+
+ spin_lock_irqsave(&mbx->aen_lock, flags);
+ mbx->rsp_status = QLC_83XX_MBX_RESPONSE_WAIT;
+ spin_unlock_irqrestore(&mbx->aen_lock, flags);
+
+ spin_lock_bh(&mbx->queue_lock);
+
+ if (list_empty(head)) {
+ spin_unlock_bh(&mbx->queue_lock);
+ return;
+ }
+ cmd = list_entry(head->next, struct qlcnic_cmd_args, list);
+
+ spin_unlock_bh(&mbx->queue_lock);
+
+ mbx_ops->encode_cmd(adapter, cmd);
+ mbx_ops->nofity_fw(adapter, QLC_83XX_MBX_REQUEST);
+
+ if (wait_for_completion_timeout(&mbx->completion,
+ QLC_83XX_MBX_TIMEOUT)) {
+ mbx_ops->decode_resp(adapter, cmd);
+ mbx_ops->nofity_fw(adapter, QLC_83XX_MBX_COMPLETION);
+ } else {
+ dev_err(dev, "%s: Mailbox command timeout, opcode=0x%x, cmd_type=0x%x, func=0x%x, op_mode=0x%x\n",
+ __func__, cmd->cmd_op, cmd->type, ahw->pci_func,
+ ahw->op_mode);
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+ qlcnic_dump_mailbox_registers(adapter);
+ qlcnic_83xx_get_mbx_data(adapter, cmd);
+ qlcnic_dump_mbx(adapter, cmd);
+ qlcnic_83xx_idc_request_reset(adapter,
+ QLCNIC_FORCE_FW_DUMP_KEY);
+ cmd->rsp_opcode = QLCNIC_RCODE_TIMEOUT;
+ }
+ mbx_ops->dequeue_cmd(adapter, cmd);
+ }
+}
+
+static const struct qlcnic_mbx_ops qlcnic_83xx_mbx_ops = {
+ .enqueue_cmd = qlcnic_83xx_enqueue_mbx_cmd,
+ .dequeue_cmd = qlcnic_83xx_dequeue_mbx_cmd,
+ .decode_resp = qlcnic_83xx_decode_mbx_rsp,
+ .encode_cmd = qlcnic_83xx_encode_mbx_cmd,
+ .nofity_fw = qlcnic_83xx_signal_mbx_cmd,
+};
+
+int qlcnic_83xx_init_mailbox_work(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_mailbox *mbx;
+
+ ahw->mailbox = kzalloc(sizeof(*mbx), GFP_KERNEL);
+ if (!ahw->mailbox)
+ return -ENOMEM;
+
+ mbx = ahw->mailbox;
+ mbx->ops = &qlcnic_83xx_mbx_ops;
+ mbx->adapter = adapter;
+
+ spin_lock_init(&mbx->queue_lock);
+ spin_lock_init(&mbx->aen_lock);
+ INIT_LIST_HEAD(&mbx->cmd_q);
+ init_completion(&mbx->completion);
+
+ mbx->work_q = create_singlethread_workqueue("qlcnic_mailbox");
+ if (mbx->work_q == NULL) {
+ kfree(mbx);
+ return -ENOMEM;
+ }
+
+ INIT_WORK(&mbx->work, qlcnic_83xx_mailbox_worker);
+ set_bit(QLC_83XX_MBX_READY, &mbx->status);
+ return 0;
+}
+
+static pci_ers_result_t qlcnic_83xx_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ if (state == pci_channel_io_normal)
+ return PCI_ERS_RESULT_RECOVERED;
+
+ set_bit(__QLCNIC_AER, &adapter->state);
+ set_bit(__QLCNIC_RESETTING, &adapter->state);
+
+ qlcnic_83xx_aer_stop_poll_work(adapter);
+
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *pdev)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+ int err = 0;
+
+ pdev->error_state = pci_channel_io_normal;
+ err = pci_enable_device(pdev);
+ if (err)
+ goto disconnect;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+
+ err = qlcnic_83xx_aer_reset(adapter);
+ if (err == 0)
+ return PCI_ERS_RESULT_RECOVERED;
+disconnect:
+ clear_bit(__QLCNIC_AER, &adapter->state);
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return PCI_ERS_RESULT_DISCONNECT;
+}
+
+static void qlcnic_83xx_io_resume(struct pci_dev *pdev)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+
+ if (test_and_clear_bit(__QLCNIC_AER, &adapter->state))
+ qlcnic_83xx_aer_start_poll_work(adapter);
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
new file mode 100644
index 000000000..23cd47d58
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -0,0 +1,666 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ */
+
+#ifndef __QLCNIC_83XX_HW_H
+#define __QLCNIC_83XX_HW_H
+
+#include <linux/types.h>
+#include <linux/etherdevice.h>
+
+#include "qlcnic_hw.h"
+
+#define QLCNIC_83XX_BAR0_LENGTH 0x4000
+
+/* Directly mapped registers */
+#define QLC_83XX_CRB_WIN_BASE 0x3800
+#define QLC_83XX_CRB_WIN_FUNC(f) (QLC_83XX_CRB_WIN_BASE+((f)*4))
+#define QLC_83XX_SEM_LOCK_BASE 0x3840
+#define QLC_83XX_SEM_UNLOCK_BASE 0x3844
+#define QLC_83XX_SEM_LOCK_FUNC(f) (QLC_83XX_SEM_LOCK_BASE+((f)*8))
+#define QLC_83XX_SEM_UNLOCK_FUNC(f) (QLC_83XX_SEM_UNLOCK_BASE+((f)*8))
+#define QLC_83XX_LINK_STATE(f) (0x3698+((f) > 7 ? 4 : 0))
+#define QLC_83XX_LINK_SPEED(f) (0x36E0+(((f) >> 2) * 4))
+#define QLC_83XX_LINK_SPEED_FACTOR 10
+#define QLC_83xx_FUNC_VAL(v, f) ((v) & (1 << (f * 4)))
+#define QLC_83XX_INTX_PTR 0x38C0
+#define QLC_83XX_INTX_TRGR 0x38C4
+#define QLC_83XX_INTX_MASK 0x38C8
+
+#define QLC_83XX_DRV_LOCK_WAIT_COUNTER 100
+#define QLC_83XX_DRV_LOCK_WAIT_DELAY 20
+#define QLC_83XX_NEED_DRV_LOCK_RECOVERY 1
+#define QLC_83XX_DRV_LOCK_RECOVERY_IN_PROGRESS 2
+#define QLC_83XX_MAX_DRV_LOCK_RECOVERY_ATTEMPT 3
+#define QLC_83XX_DRV_LOCK_RECOVERY_DELAY 200
+#define QLC_83XX_DRV_LOCK_RECOVERY_STATUS_MASK 0x3
+#define QLC_83XX_LB_WAIT_COUNT 250
+#define QLC_83XX_LB_MSLEEP_COUNT 20
+#define QLC_83XX_NO_NIC_RESOURCE 0x5
+#define QLC_83XX_MAC_PRESENT 0xC
+#define QLC_83XX_MAC_ABSENT 0xD
+
+
+#define QLC_83XX_FLASH_SECTOR_SIZE (64 * 1024)
+
+/* PEG status definitions */
+#define QLC_83XX_CMDPEG_COMPLETE 0xff01
+#define QLC_83XX_VALID_INTX_BIT30(val) ((val) & BIT_30)
+#define QLC_83XX_VALID_INTX_BIT31(val) ((val) & BIT_31)
+#define QLC_83XX_INTX_FUNC(val) ((val) & 0xFF)
+#define QLC_83XX_LEGACY_INTX_MAX_RETRY 100
+#define QLC_83XX_LEGACY_INTX_DELAY 4
+#define QLC_83XX_REG_DESC 1
+#define QLC_83XX_LRO_DESC 2
+#define QLC_83XX_CTRL_DESC 3
+#define QLC_83XX_FW_CAPABILITY_TSO BIT_6
+#define QLC_83XX_FW_CAP_LRO_MSS BIT_17
+#define QLC_83XX_HOST_RDS_MODE_UNIQUE 0
+#define QLC_83XX_HOST_SDS_MBX_IDX 8
+
+#define QLCNIC_HOST_RDS_MBX_IDX 88
+
+/* Pause control registers */
+#define QLC_83XX_SRE_SHIM_REG 0x0D200284
+#define QLC_83XX_PORT0_THRESHOLD 0x0B2003A4
+#define QLC_83XX_PORT1_THRESHOLD 0x0B2013A4
+#define QLC_83XX_PORT0_TC_MC_REG 0x0B200388
+#define QLC_83XX_PORT1_TC_MC_REG 0x0B201388
+#define QLC_83XX_PORT0_TC_STATS 0x0B20039C
+#define QLC_83XX_PORT1_TC_STATS 0x0B20139C
+#define QLC_83XX_PORT2_IFB_THRESHOLD 0x0B200704
+#define QLC_83XX_PORT3_IFB_THRESHOLD 0x0B201704
+
+/* Peg PC status registers */
+#define QLC_83XX_CRB_PEG_NET_0 0x3400003c
+#define QLC_83XX_CRB_PEG_NET_1 0x3410003c
+#define QLC_83XX_CRB_PEG_NET_2 0x3420003c
+#define QLC_83XX_CRB_PEG_NET_3 0x3430003c
+#define QLC_83XX_CRB_PEG_NET_4 0x34b0003c
+
+/* Firmware image definitions */
+#define QLC_83XX_BOOTLOADER_FLASH_ADDR 0x10000
+#define QLC_83XX_FW_FILE_NAME "83xx_fw.bin"
+#define QLC_83XX_POST_FW_FILE_NAME "83xx_post_fw.bin"
+#define QLC_84XX_FW_FILE_NAME "84xx_fw.bin"
+#define QLC_83XX_BOOT_FROM_FLASH 0
+#define QLC_83XX_BOOT_FROM_FILE 0x12345678
+
+#define QLC_FW_FILE_NAME_LEN 20
+#define QLC_83XX_MAX_RESET_SEQ_ENTRIES 16
+
+#define QLC_83XX_MBX_POST_BC_OP 0x1
+#define QLC_83XX_MBX_COMPLETION 0x0
+#define QLC_83XX_MBX_REQUEST 0x1
+
+#define QLC_83XX_MBX_TIMEOUT (5 * HZ)
+#define QLC_83XX_MBX_CMD_LOOP 5000000
+
+/* status descriptor mailbox data
+ * @phy_addr_{low|high}: physical address of buffer
+ * @sds_ring_size: buffer size
+ * @intrpt_id: interrupt id
+ * @intrpt_val: source of interrupt
+ */
+struct qlcnic_sds_mbx {
+ u32 phy_addr_low;
+ u32 phy_addr_high;
+ u32 rsvd1[4];
+#if defined(__LITTLE_ENDIAN)
+ u16 sds_ring_size;
+ u16 rsvd2;
+ u16 rsvd3[2];
+ u16 intrpt_id;
+ u8 intrpt_val;
+ u8 rsvd4;
+#elif defined(__BIG_ENDIAN)
+ u16 rsvd2;
+ u16 sds_ring_size;
+ u16 rsvd3[2];
+ u8 rsvd4;
+ u8 intrpt_val;
+ u16 intrpt_id;
+#endif
+ u32 rsvd5;
+} __packed;
+
+/* receive descriptor buffer data
+ * phy_addr_reg_{low|high}: physical address of regular buffer
+ * phy_addr_jmb_{low|high}: physical address of jumbo buffer
+ * reg_ring_sz: size of regular buffer
+ * reg_ring_len: no. of entries in regular buffer
+ * jmb_ring_len: no. of entries in jumbo buffer
+ * jmb_ring_sz: size of jumbo buffer
+ */
+struct qlcnic_rds_mbx {
+ u32 phy_addr_reg_low;
+ u32 phy_addr_reg_high;
+ u32 phy_addr_jmb_low;
+ u32 phy_addr_jmb_high;
+#if defined(__LITTLE_ENDIAN)
+ u16 reg_ring_sz;
+ u16 reg_ring_len;
+ u16 jmb_ring_sz;
+ u16 jmb_ring_len;
+#elif defined(__BIG_ENDIAN)
+ u16 reg_ring_len;
+ u16 reg_ring_sz;
+ u16 jmb_ring_len;
+ u16 jmb_ring_sz;
+#endif
+} __packed;
+
+/* host producers for regular and jumbo rings */
+struct __host_producer_mbx {
+ u32 reg_buf;
+ u32 jmb_buf;
+} __packed;
+
+/* Receive context mailbox data outbox registers
+ * @state: state of the context
+ * @vport_id: virtual port id
+ * @context_id: receive context id
+ * @num_pci_func: number of pci functions of the port
+ * @phy_port: physical port id
+ */
+struct qlcnic_rcv_mbx_out {
+#if defined(__LITTLE_ENDIAN)
+ u8 rcv_num;
+ u8 sts_num;
+ u16 ctx_id;
+ u8 state;
+ u8 num_pci_func;
+ u8 phy_port;
+ u8 vport_id;
+#elif defined(__BIG_ENDIAN)
+ u16 ctx_id;
+ u8 sts_num;
+ u8 rcv_num;
+ u8 vport_id;
+ u8 phy_port;
+ u8 num_pci_func;
+ u8 state;
+#endif
+ u32 host_csmr[QLCNIC_MAX_SDS_RINGS];
+ struct __host_producer_mbx host_prod[QLCNIC_MAX_SDS_RINGS];
+} __packed;
+
+struct qlcnic_add_rings_mbx_out {
+#if defined(__LITTLE_ENDIAN)
+ u8 rcv_num;
+ u8 sts_num;
+ u16 ctx_id;
+#elif defined(__BIG_ENDIAN)
+ u16 ctx_id;
+ u8 sts_num;
+ u8 rcv_num;
+#endif
+ u32 host_csmr[QLCNIC_MAX_SDS_RINGS];
+ struct __host_producer_mbx host_prod[QLCNIC_MAX_SDS_RINGS];
+} __packed;
+
+/* Transmit context mailbox inbox registers
+ * @phys_addr_{low|high}: DMA address of the transmit buffer
+ * @cnsmr_index_{low|high}: host consumer index
+ * @size: legth of transmit buffer ring
+ * @intr_id: interrupt id
+ * @src: src of interrupt
+ */
+struct qlcnic_tx_mbx {
+ u32 phys_addr_low;
+ u32 phys_addr_high;
+ u32 cnsmr_index_low;
+ u32 cnsmr_index_high;
+#if defined(__LITTLE_ENDIAN)
+ u16 size;
+ u16 intr_id;
+ u8 src;
+ u8 rsvd[3];
+#elif defined(__BIG_ENDIAN)
+ u16 intr_id;
+ u16 size;
+ u8 rsvd[3];
+ u8 src;
+#endif
+} __packed;
+
+/* Transmit context mailbox outbox registers
+ * @host_prod: host producer index
+ * @ctx_id: transmit context id
+ * @state: state of the transmit context
+ */
+
+struct qlcnic_tx_mbx_out {
+ u32 host_prod;
+#if defined(__LITTLE_ENDIAN)
+ u16 ctx_id;
+ u8 state;
+ u8 rsvd;
+#elif defined(__BIG_ENDIAN)
+ u8 rsvd;
+ u8 state;
+ u16 ctx_id;
+#endif
+} __packed;
+
+struct qlcnic_intrpt_config {
+ u8 type;
+ u8 enabled;
+ u16 id;
+ u32 src;
+};
+
+struct qlcnic_macvlan_mbx {
+#if defined(__LITTLE_ENDIAN)
+ u8 mac_addr0;
+ u8 mac_addr1;
+ u8 mac_addr2;
+ u8 mac_addr3;
+ u8 mac_addr4;
+ u8 mac_addr5;
+ u16 vlan;
+#elif defined(__BIG_ENDIAN)
+ u8 mac_addr3;
+ u8 mac_addr2;
+ u8 mac_addr1;
+ u8 mac_addr0;
+ u16 vlan;
+ u8 mac_addr5;
+ u8 mac_addr4;
+#endif
+};
+
+struct qlc_83xx_fw_info {
+ const struct firmware *fw;
+ char fw_file_name[QLC_FW_FILE_NAME_LEN];
+};
+
+struct qlc_83xx_reset {
+ struct qlc_83xx_reset_hdr *hdr;
+ int seq_index;
+ int seq_error;
+ int array_index;
+ u32 array[QLC_83XX_MAX_RESET_SEQ_ENTRIES];
+ u8 *buff;
+ u8 *stop_offset;
+ u8 *start_offset;
+ u8 *init_offset;
+ u8 seq_end;
+ u8 template_end;
+};
+
+#define QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY 0x1
+#define QLC_83XX_IDC_GRACEFULL_RESET 0x2
+#define QLC_83XX_IDC_DISABLE_FW_DUMP 0x4
+#define QLC_83XX_IDC_TIMESTAMP 0
+#define QLC_83XX_IDC_DURATION 1
+#define QLC_83XX_IDC_INIT_TIMEOUT_SECS 30
+#define QLC_83XX_IDC_RESET_ACK_TIMEOUT_SECS 10
+#define QLC_83XX_IDC_RESET_TIMEOUT_SECS 10
+#define QLC_83XX_IDC_QUIESCE_ACK_TIMEOUT_SECS 20
+#define QLC_83XX_IDC_FW_POLL_DELAY (1 * HZ)
+#define QLC_83XX_IDC_FW_FAIL_THRESH 2
+#define QLC_83XX_IDC_MAX_FUNC_PER_PARTITION_INFO 8
+#define QLC_83XX_IDC_MAX_CNA_FUNCTIONS 16
+#define QLC_83XX_IDC_MAJOR_VERSION 1
+#define QLC_83XX_IDC_MINOR_VERSION 0
+#define QLC_83XX_IDC_FLASH_PARAM_ADDR 0x3e8020
+
+struct qlcnic_adapter;
+struct qlcnic_fw_dump;
+
+struct qlc_83xx_idc {
+ int (*state_entry) (struct qlcnic_adapter *);
+ u64 sec_counter;
+ u64 delay;
+ unsigned long status;
+ int err_code;
+ int collect_dump;
+ u8 curr_state;
+ u8 prev_state;
+ u8 vnic_state;
+ u8 vnic_wait_limit;
+ u8 quiesce_req;
+ u8 delay_reset;
+ char **name;
+};
+
+enum qlcnic_vlan_operations {
+ QLC_VLAN_ADD = 0,
+ QLC_VLAN_DELETE
+};
+
+/* Device States */
+enum qlcnic_83xx_states {
+ QLC_83XX_IDC_DEV_UNKNOWN,
+ QLC_83XX_IDC_DEV_COLD,
+ QLC_83XX_IDC_DEV_INIT,
+ QLC_83XX_IDC_DEV_READY,
+ QLC_83XX_IDC_DEV_NEED_RESET,
+ QLC_83XX_IDC_DEV_NEED_QUISCENT,
+ QLC_83XX_IDC_DEV_FAILED,
+ QLC_83XX_IDC_DEV_QUISCENT
+};
+
+#define QLCNIC_MBX_RSP(reg) LSW(reg)
+#define QLCNIC_MBX_NUM_REGS(reg) (MSW(reg) & 0x1FF)
+#define QLCNIC_MBX_STATUS(reg) (((reg) >> 25) & 0x7F)
+#define QLCNIC_MBX_HOST(ahw, i) ((ahw)->pci_base0 + ((i) * 4))
+#define QLCNIC_MBX_FW(ahw, i) ((ahw)->pci_base0 + 0x800 + ((i) * 4))
+
+/* Mailbox process AEN count */
+#define QLC_83XX_IDC_COMP_AEN 3
+#define QLC_83XX_MBX_AEN_CNT 5
+#define QLC_83XX_MODULE_LOADED 1
+#define QLC_83XX_MBX_READY 2
+#define QLC_83XX_MBX_AEN_ACK 3
+#define QLC_83XX_SFP_PRESENT(data) ((data) & 3)
+#define QLC_83XX_SFP_ERR(data) (((data) >> 2) & 3)
+#define QLC_83XX_SFP_MODULE_TYPE(data) (((data) >> 4) & 0x1F)
+#define QLC_83XX_SFP_CU_LENGTH(data) (LSB((data) >> 16))
+#define QLC_83XX_SFP_TX_FAULT(data) ((data) & BIT_10)
+#define QLC_83XX_LINK_STATS(data) ((data) & BIT_0)
+#define QLC_83XX_CURRENT_LINK_SPEED(data) (((data) >> 3) & 7)
+#define QLC_83XX_LINK_PAUSE(data) (((data) >> 6) & 3)
+#define QLC_83XX_LINK_LB(data) (((data) >> 8) & 7)
+#define QLC_83XX_LINK_FEC(data) ((data) & BIT_12)
+#define QLC_83XX_LINK_EEE(data) ((data) & BIT_13)
+#define QLC_83XX_DCBX(data) (((data) >> 28) & 7)
+#define QLC_83XX_AUTONEG(data) ((data) & BIT_15)
+#define QLC_83XX_TX_PAUSE 0x10
+#define QLC_83XX_RX_PAUSE 0x20
+#define QLC_83XX_TX_RX_PAUSE 0x30
+#define QLC_83XX_CFG_STD_PAUSE (1 << 5)
+#define QLC_83XX_CFG_STD_TX_PAUSE (1 << 20)
+#define QLC_83XX_CFG_STD_RX_PAUSE (2 << 20)
+#define QLC_83XX_CFG_STD_TX_RX_PAUSE (3 << 20)
+#define QLC_83XX_ENABLE_AUTONEG (1 << 15)
+#define QLC_83XX_CFG_LOOPBACK_HSS (2 << 1)
+#define QLC_83XX_CFG_LOOPBACK_PHY (3 << 1)
+#define QLC_83XX_CFG_LOOPBACK_EXT (4 << 1)
+
+/* LED configuration settings */
+#define QLC_83XX_ENABLE_BEACON 0xe
+#define QLC_83XX_BEACON_ON 1
+#define QLC_83XX_BEACON_OFF 0
+#define QLC_83XX_LED_RATE 0xff
+#define QLC_83XX_LED_ACT (1 << 10)
+#define QLC_83XX_LED_MOD (0 << 13)
+#define QLC_83XX_LED_CONFIG (QLC_83XX_LED_RATE | QLC_83XX_LED_ACT | \
+ QLC_83XX_LED_MOD)
+
+#define QLC_83XX_10M_LINK 1
+#define QLC_83XX_100M_LINK 2
+#define QLC_83XX_1G_LINK 3
+#define QLC_83XX_10G_LINK 4
+#define QLC_83XX_STAT_TX 3
+#define QLC_83XX_STAT_RX 2
+#define QLC_83XX_STAT_MAC 1
+#define QLC_83XX_TX_STAT_REGS 14
+#define QLC_83XX_RX_STAT_REGS 40
+#define QLC_83XX_MAC_STAT_REGS 94
+
+#define QLC_83XX_GET_FUNC_PRIVILEGE(VAL, FN) (0x3 & ((VAL) >> (FN * 2)))
+#define QLC_83XX_SET_FUNC_OPMODE(VAL, FN) ((VAL) << (FN * 2))
+#define QLC_83XX_DEFAULT_OPMODE 0x55555555
+#define QLC_83XX_PRIVLEGED_FUNC 0x1
+#define QLC_83XX_VIRTUAL_FUNC 0x2
+
+#define QLC_83XX_LB_MAX_FILTERS 2048
+#define QLC_83XX_LB_BUCKET_SIZE 256
+#define QLC_83XX_MINIMUM_VECTOR 3
+#define QLC_83XX_MAX_MC_COUNT 38
+#define QLC_83XX_MAX_UC_COUNT 4096
+
+#define QLC_83XX_PVID_STRIP_CAPABILITY BIT_22
+#define QLC_83XX_GET_FUNC_MODE_FROM_NPAR_INFO(val) (val & 0x80000000)
+#define QLC_83XX_GET_LRO_CAPABILITY(val) (val & 0x20)
+#define QLC_83XX_GET_LSO_CAPABILITY(val) (val & 0x40)
+#define QLC_83XX_GET_HW_LRO_CAPABILITY(val) (val & 0x400)
+#define QLC_83XX_GET_VLAN_ALIGN_CAPABILITY(val) (val & 0x4000)
+#define QLC_83XX_GET_FW_LRO_MSS_CAPABILITY(val) (val & 0x20000)
+#define QLC_83XX_ESWITCH_CAPABILITY BIT_23
+#define QLC_83XX_SRIOV_MODE 0x1
+#define QLCNIC_BRDTYPE_83XX_10G 0x0083
+
+#define QLC_83XX_FLASH_SPI_STATUS 0x2808E010
+#define QLC_83XX_FLASH_SPI_CONTROL 0x2808E014
+#define QLC_83XX_FLASH_STATUS 0x42100004
+#define QLC_83XX_FLASH_CONTROL 0x42110004
+#define QLC_83XX_FLASH_ADDR 0x42110008
+#define QLC_83XX_FLASH_WRDATA 0x4211000C
+#define QLC_83XX_FLASH_RDDATA 0x42110018
+#define QLC_83XX_FLASH_DIRECT_WINDOW 0x42110030
+#define QLC_83XX_FLASH_DIRECT_DATA(DATA) (0x42150000 | (0x0000FFFF&DATA))
+#define QLC_83XX_FLASH_SECTOR_ERASE_CMD 0xdeadbeef
+#define QLC_83XX_FLASH_WRITE_CMD 0xdacdacda
+#define QLC_83XX_FLASH_BULK_WRITE_CMD 0xcadcadca
+#define QLC_83XX_FLASH_READ_RETRY_COUNT 5000
+#define QLC_83XX_FLASH_STATUS_READY 0x6
+#define QLC_83XX_FLASH_WRITE_MIN 2
+#define QLC_83XX_FLASH_WRITE_MAX 64
+#define QLC_83XX_FLASH_STATUS_REG_POLL_DELAY 1
+#define QLC_83XX_ERASE_MODE 1
+#define QLC_83XX_WRITE_MODE 2
+#define QLC_83XX_BULK_WRITE_MODE 3
+#define QLC_83XX_FLASH_FDT_WRITE_DEF_SIG 0xFD0100
+#define QLC_83XX_FLASH_FDT_ERASE_DEF_SIG 0xFD0300
+#define QLC_83XX_FLASH_FDT_READ_MFG_ID_VAL 0xFD009F
+#define QLC_83XX_FLASH_OEM_ERASE_SIG 0xFD03D8
+#define QLC_83XX_FLASH_OEM_WRITE_SIG 0xFD0101
+#define QLC_83XX_FLASH_OEM_READ_SIG 0xFD0005
+#define QLC_83XX_FLASH_ADDR_TEMP_VAL 0x00800000
+#define QLC_83XX_FLASH_ADDR_SECOND_TEMP_VAL 0x00800001
+#define QLC_83XX_FLASH_WRDATA_DEF 0x0
+#define QLC_83XX_FLASH_READ_CTRL 0x3F
+#define QLC_83XX_FLASH_SPI_CTRL 0x4
+#define QLC_83XX_FLASH_FIRST_ERASE_MS_VAL 0x2
+#define QLC_83XX_FLASH_SECOND_ERASE_MS_VAL 0x5
+#define QLC_83XX_FLASH_LAST_ERASE_MS_VAL 0x3D
+#define QLC_83XX_FLASH_FIRST_MS_PATTERN 0x43
+#define QLC_83XX_FLASH_SECOND_MS_PATTERN 0x7F
+#define QLC_83XX_FLASH_LAST_MS_PATTERN 0x7D
+#define QLC_83xx_FLASH_MAX_WAIT_USEC 100
+#define QLC_83XX_FLASH_LOCK_TIMEOUT 10000
+
+enum qlc_83xx_mbx_cmd_type {
+ QLC_83XX_MBX_CMD_WAIT = 0,
+ QLC_83XX_MBX_CMD_NO_WAIT,
+ QLC_83XX_MBX_CMD_BUSY_WAIT,
+};
+
+enum qlc_83xx_mbx_response_states {
+ QLC_83XX_MBX_RESPONSE_WAIT = 0,
+ QLC_83XX_MBX_RESPONSE_ARRIVED,
+};
+
+#define QLC_83XX_MBX_RESPONSE_FAILED 0x2
+#define QLC_83XX_MBX_RESPONSE_UNKNOWN 0x3
+
+/* Additional registers in 83xx */
+enum qlc_83xx_ext_regs {
+ QLCNIC_GLOBAL_RESET = 0,
+ QLCNIC_WILDCARD,
+ QLCNIC_INFORMANT,
+ QLCNIC_HOST_MBX_CTRL,
+ QLCNIC_FW_MBX_CTRL,
+ QLCNIC_BOOTLOADER_ADDR,
+ QLCNIC_BOOTLOADER_SIZE,
+ QLCNIC_FW_IMAGE_ADDR,
+ QLCNIC_MBX_INTR_ENBL,
+ QLCNIC_DEF_INT_MASK,
+ QLCNIC_DEF_INT_ID,
+ QLC_83XX_IDC_MAJ_VERSION,
+ QLC_83XX_IDC_DEV_STATE,
+ QLC_83XX_IDC_DRV_PRESENCE,
+ QLC_83XX_IDC_DRV_ACK,
+ QLC_83XX_IDC_CTRL,
+ QLC_83XX_IDC_DRV_AUDIT,
+ QLC_83XX_IDC_MIN_VERSION,
+ QLC_83XX_RECOVER_DRV_LOCK,
+ QLC_83XX_IDC_PF_0,
+ QLC_83XX_IDC_PF_1,
+ QLC_83XX_IDC_PF_2,
+ QLC_83XX_IDC_PF_3,
+ QLC_83XX_IDC_PF_4,
+ QLC_83XX_IDC_PF_5,
+ QLC_83XX_IDC_PF_6,
+ QLC_83XX_IDC_PF_7,
+ QLC_83XX_IDC_PF_8,
+ QLC_83XX_IDC_PF_9,
+ QLC_83XX_IDC_PF_10,
+ QLC_83XX_IDC_PF_11,
+ QLC_83XX_IDC_PF_12,
+ QLC_83XX_IDC_PF_13,
+ QLC_83XX_IDC_PF_14,
+ QLC_83XX_IDC_PF_15,
+ QLC_83XX_IDC_DEV_PARTITION_INFO_1,
+ QLC_83XX_IDC_DEV_PARTITION_INFO_2,
+ QLC_83XX_DRV_OP_MODE,
+ QLC_83XX_VNIC_STATE,
+ QLC_83XX_DRV_LOCK,
+ QLC_83XX_DRV_UNLOCK,
+ QLC_83XX_DRV_LOCK_ID,
+ QLC_83XX_ASIC_TEMP,
+};
+
+/* Initialize/Stop NIC command bit definitions */
+#define QLC_REGISTER_LB_IDC BIT_0
+#define QLC_REGISTER_DCB_AEN BIT_1
+#define QLC_83XX_MULTI_TENANCY_INFO BIT_29
+#define QLC_INIT_FW_RESOURCES BIT_31
+
+/* 83xx funcitons */
+int qlcnic_83xx_get_fw_version(struct qlcnic_adapter *);
+int qlcnic_83xx_issue_cmd(struct qlcnic_adapter *, struct qlcnic_cmd_args *);
+int qlcnic_83xx_setup_intr(struct qlcnic_adapter *);
+void qlcnic_83xx_get_func_no(struct qlcnic_adapter *);
+int qlcnic_83xx_cam_lock(struct qlcnic_adapter *);
+void qlcnic_83xx_cam_unlock(struct qlcnic_adapter *);
+int qlcnic_send_ctrl_op(struct qlcnic_adapter *, struct qlcnic_cmd_args *, u32);
+void qlcnic_83xx_add_sysfs(struct qlcnic_adapter *);
+void qlcnic_83xx_remove_sysfs(struct qlcnic_adapter *);
+void qlcnic_83xx_write_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
+void qlcnic_83xx_read_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
+int qlcnic_83xx_rd_reg_indirect(struct qlcnic_adapter *, ulong, int *);
+int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *, ulong, u32);
+int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *, u32);
+int qlcnic_83xx_config_hw_lro(struct qlcnic_adapter *, int);
+int qlcnic_83xx_config_rss(struct qlcnic_adapter *, int);
+void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr,
+ u16 vlan, struct qlcnic_host_tx_ring *ring);
+int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info *);
+int qlcnic_83xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
+void qlcnic_83xx_initialize_nic(struct qlcnic_adapter *, int);
+
+int qlcnic_83xx_napi_add(struct qlcnic_adapter *, struct net_device *);
+void qlcnic_83xx_napi_del(struct qlcnic_adapter *);
+void qlcnic_83xx_napi_enable(struct qlcnic_adapter *);
+void qlcnic_83xx_napi_disable(struct qlcnic_adapter *);
+int qlcnic_83xx_config_led(struct qlcnic_adapter *, u32, u32);
+int qlcnic_ind_wr(struct qlcnic_adapter *, u32, u32);
+int qlcnic_ind_rd(struct qlcnic_adapter *, u32);
+int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *);
+int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *,
+ struct qlcnic_host_tx_ring *, int);
+void qlcnic_83xx_del_rx_ctx(struct qlcnic_adapter *);
+void qlcnic_83xx_del_tx_ctx(struct qlcnic_adapter *,
+ struct qlcnic_host_tx_ring *);
+int qlcnic_83xx_get_nic_info(struct qlcnic_adapter *, struct qlcnic_info *, u8);
+int qlcnic_83xx_setup_link_event(struct qlcnic_adapter *, int);
+void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *);
+int qlcnic_83xx_config_intrpt(struct qlcnic_adapter *, bool);
+int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *, u8 *, u16, u8);
+int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *, u8 *, u8);
+int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *,
+ struct qlcnic_adapter *, u32);
+void qlcnic_free_mbx_args(struct qlcnic_cmd_args *);
+void qlcnic_set_npar_data(struct qlcnic_adapter *, const struct qlcnic_info *,
+ struct qlcnic_info *);
+int qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *,
+ struct ethtool_coalesce *);
+int qlcnic_83xx_set_rx_tx_intr_coal(struct qlcnic_adapter *);
+int qlcnic_83xx_get_port_info(struct qlcnic_adapter *);
+void qlcnic_83xx_enable_mbx_interrupt(struct qlcnic_adapter *);
+void qlcnic_83xx_disable_mbx_intr(struct qlcnic_adapter *);
+irqreturn_t qlcnic_83xx_clear_legacy_intr(struct qlcnic_adapter *);
+irqreturn_t qlcnic_83xx_intr(int, void *);
+irqreturn_t qlcnic_83xx_tmp_intr(int, void *);
+void qlcnic_83xx_check_vf(struct qlcnic_adapter *,
+ const struct pci_device_id *);
+int qlcnic_83xx_config_default_opmode(struct qlcnic_adapter *);
+int qlcnic_83xx_setup_mbx_intr(struct qlcnic_adapter *);
+void qlcnic_83xx_free_mbx_intr(struct qlcnic_adapter *);
+void qlcnic_83xx_register_map(struct qlcnic_hardware_context *);
+void qlcnic_83xx_idc_aen_work(struct work_struct *);
+void qlcnic_83xx_config_ipaddr(struct qlcnic_adapter *, __be32, int);
+
+int qlcnic_83xx_erase_flash_sector(struct qlcnic_adapter *, u32);
+int qlcnic_83xx_flash_bulk_write(struct qlcnic_adapter *, u32, u32 *, int);
+int qlcnic_83xx_flash_write32(struct qlcnic_adapter *, u32, u32 *);
+int qlcnic_83xx_lock_flash(struct qlcnic_adapter *);
+void qlcnic_83xx_unlock_flash(struct qlcnic_adapter *);
+int qlcnic_83xx_save_flash_status(struct qlcnic_adapter *);
+int qlcnic_83xx_restore_flash_status(struct qlcnic_adapter *, int);
+int qlcnic_83xx_read_flash_mfg_id(struct qlcnic_adapter *);
+int qlcnic_83xx_read_flash_descriptor_table(struct qlcnic_adapter *);
+int qlcnic_83xx_flash_read32(struct qlcnic_adapter *, u32, u8 *, int);
+int qlcnic_83xx_lockless_flash_read32(struct qlcnic_adapter *,
+ u32, u8 *, int);
+int qlcnic_83xx_init(struct qlcnic_adapter *);
+int qlcnic_83xx_idc_ready_state_entry(struct qlcnic_adapter *);
+void qlcnic_83xx_idc_poll_dev_state(struct work_struct *);
+void qlcnic_83xx_idc_exit(struct qlcnic_adapter *);
+void qlcnic_83xx_idc_request_reset(struct qlcnic_adapter *, u32);
+int qlcnic_83xx_lock_driver(struct qlcnic_adapter *);
+void qlcnic_83xx_unlock_driver(struct qlcnic_adapter *);
+int qlcnic_83xx_set_default_offload_settings(struct qlcnic_adapter *);
+int qlcnic_83xx_idc_vnic_pf_entry(struct qlcnic_adapter *);
+int qlcnic_83xx_disable_vnic_mode(struct qlcnic_adapter *, int);
+int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *);
+int qlcnic_83xx_get_vnic_vport_info(struct qlcnic_adapter *,
+ struct qlcnic_info *, u8);
+int qlcnic_83xx_get_vnic_pf_info(struct qlcnic_adapter *, struct qlcnic_info *);
+int qlcnic_83xx_set_port_eswitch_status(struct qlcnic_adapter *, int, int *);
+
+void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *);
+void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data);
+int qlcnic_83xx_extend_md_capab(struct qlcnic_adapter *);
+int qlcnic_83xx_get_link_ksettings(struct qlcnic_adapter *adapter,
+ struct ethtool_link_ksettings *ecmd);
+int qlcnic_83xx_set_link_ksettings(struct qlcnic_adapter *adapter,
+ const struct ethtool_link_ksettings *ecmd);
+void qlcnic_83xx_get_pauseparam(struct qlcnic_adapter *,
+ struct ethtool_pauseparam *);
+int qlcnic_83xx_set_pauseparam(struct qlcnic_adapter *,
+ struct ethtool_pauseparam *);
+int qlcnic_83xx_test_link(struct qlcnic_adapter *);
+void qlcnic_83xx_get_port_type(struct qlcnic_adapter *adapter);
+int qlcnic_83xx_reg_test(struct qlcnic_adapter *);
+int qlcnic_83xx_get_regs_len(struct qlcnic_adapter *);
+int qlcnic_83xx_get_registers(struct qlcnic_adapter *, u32 *);
+int qlcnic_83xx_loopback_test(struct net_device *, u8);
+int qlcnic_83xx_interrupt_test(struct net_device *);
+int qlcnic_83xx_set_led(struct net_device *, enum ethtool_phys_id_state);
+int qlcnic_83xx_flash_test(struct qlcnic_adapter *);
+int qlcnic_83xx_enable_flash_write(struct qlcnic_adapter *);
+int qlcnic_83xx_disable_flash_write(struct qlcnic_adapter *);
+void qlcnic_83xx_enable_mbx_poll(struct qlcnic_adapter *);
+void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *);
+int qlcnic_83xx_idc_init(struct qlcnic_adapter *);
+int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *);
+int qlcnic_83xx_set_vnic_opmode(struct qlcnic_adapter *);
+int qlcnic_83xx_check_vnic_state(struct qlcnic_adapter *);
+void qlcnic_83xx_aer_stop_poll_work(struct qlcnic_adapter *);
+int qlcnic_83xx_aer_reset(struct qlcnic_adapter *);
+void qlcnic_83xx_aer_start_poll_work(struct qlcnic_adapter *);
+u32 qlcnic_83xx_get_saved_state(void *, u32);
+void qlcnic_83xx_set_saved_state(void *, u32, u32);
+void qlcnic_83xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *);
+u32 qlcnic_83xx_get_cap_size(void *, int);
+void qlcnic_83xx_set_sys_info(void *, int, u32);
+void qlcnic_83xx_store_cap_mask(void *, u32);
+int qlcnic_ms_mem_write128(struct qlcnic_adapter *, u64, u32 *, u32);
+#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
new file mode 100644
index 000000000..c95d56e56
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -0,0 +1,2600 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ */
+
+#include "qlcnic_sriov.h"
+#include "qlcnic.h"
+#include "qlcnic_hw.h"
+
+/* Reset template definitions */
+#define QLC_83XX_RESTART_TEMPLATE_SIZE 0x2000
+#define QLC_83XX_RESET_TEMPLATE_ADDR 0x4F0000
+#define QLC_83XX_RESET_SEQ_VERSION 0x0101
+
+#define QLC_83XX_OPCODE_NOP 0x0000
+#define QLC_83XX_OPCODE_WRITE_LIST 0x0001
+#define QLC_83XX_OPCODE_READ_WRITE_LIST 0x0002
+#define QLC_83XX_OPCODE_POLL_LIST 0x0004
+#define QLC_83XX_OPCODE_POLL_WRITE_LIST 0x0008
+#define QLC_83XX_OPCODE_READ_MODIFY_WRITE 0x0010
+#define QLC_83XX_OPCODE_SEQ_PAUSE 0x0020
+#define QLC_83XX_OPCODE_SEQ_END 0x0040
+#define QLC_83XX_OPCODE_TMPL_END 0x0080
+#define QLC_83XX_OPCODE_POLL_READ_LIST 0x0100
+
+/* EPORT control registers */
+#define QLC_83XX_RESET_CONTROL 0x28084E50
+#define QLC_83XX_RESET_REG 0x28084E60
+#define QLC_83XX_RESET_PORT0 0x28084E70
+#define QLC_83XX_RESET_PORT1 0x28084E80
+#define QLC_83XX_RESET_PORT2 0x28084E90
+#define QLC_83XX_RESET_PORT3 0x28084EA0
+#define QLC_83XX_RESET_SRESHIM 0x28084EB0
+#define QLC_83XX_RESET_EPGSHIM 0x28084EC0
+#define QLC_83XX_RESET_ETHERPCS 0x28084ED0
+
+static int qlcnic_83xx_init_default_driver(struct qlcnic_adapter *adapter);
+static int qlcnic_83xx_check_heartbeat(struct qlcnic_adapter *p_dev);
+static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter);
+static int qlcnic_83xx_check_hw_status(struct qlcnic_adapter *p_dev);
+static int qlcnic_83xx_get_reset_instruction_template(struct qlcnic_adapter *);
+static void qlcnic_83xx_stop_hw(struct qlcnic_adapter *);
+
+/* Template header */
+struct qlc_83xx_reset_hdr {
+#if defined(__LITTLE_ENDIAN)
+ u16 version;
+ u16 signature;
+ u16 size;
+ u16 entries;
+ u16 hdr_size;
+ u16 checksum;
+ u16 init_offset;
+ u16 start_offset;
+#elif defined(__BIG_ENDIAN)
+ u16 signature;
+ u16 version;
+ u16 entries;
+ u16 size;
+ u16 checksum;
+ u16 hdr_size;
+ u16 start_offset;
+ u16 init_offset;
+#endif
+} __packed;
+
+/* Command entry header. */
+struct qlc_83xx_entry_hdr {
+#if defined(__LITTLE_ENDIAN)
+ u16 cmd;
+ u16 size;
+ u16 count;
+ u16 delay;
+#elif defined(__BIG_ENDIAN)
+ u16 size;
+ u16 cmd;
+ u16 delay;
+ u16 count;
+#endif
+} __packed;
+
+/* Generic poll command */
+struct qlc_83xx_poll {
+ u32 mask;
+ u32 status;
+} __packed;
+
+/* Read modify write command */
+struct qlc_83xx_rmw {
+ u32 mask;
+ u32 xor_value;
+ u32 or_value;
+#if defined(__LITTLE_ENDIAN)
+ u8 shl;
+ u8 shr;
+ u8 index_a;
+ u8 rsvd;
+#elif defined(__BIG_ENDIAN)
+ u8 rsvd;
+ u8 index_a;
+ u8 shr;
+ u8 shl;
+#endif
+} __packed;
+
+/* Generic command with 2 DWORD */
+struct qlc_83xx_entry {
+ u32 arg1;
+ u32 arg2;
+} __packed;
+
+/* Generic command with 4 DWORD */
+struct qlc_83xx_quad_entry {
+ u32 dr_addr;
+ u32 dr_value;
+ u32 ar_addr;
+ u32 ar_value;
+} __packed;
+static const char *const qlc_83xx_idc_states[] = {
+ "Unknown",
+ "Cold",
+ "Init",
+ "Ready",
+ "Need Reset",
+ "Need Quiesce",
+ "Failed",
+ "Quiesce"
+};
+
+static int
+qlcnic_83xx_idc_check_driver_presence_reg(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE);
+ if ((val & 0xFFFF))
+ return 1;
+ else
+ return 0;
+}
+
+static void qlcnic_83xx_idc_log_state_history(struct qlcnic_adapter *adapter)
+{
+ u32 cur, prev;
+ cur = adapter->ahw->idc.curr_state;
+ prev = adapter->ahw->idc.prev_state;
+
+ dev_info(&adapter->pdev->dev,
+ "current state = %s, prev state = %s\n",
+ adapter->ahw->idc.name[cur],
+ adapter->ahw->idc.name[prev]);
+}
+
+static int qlcnic_83xx_idc_update_audit_reg(struct qlcnic_adapter *adapter,
+ u8 mode, int lock)
+{
+ u32 val;
+ int seconds;
+
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_AUDIT);
+ val |= (adapter->portnum & 0xf);
+ val |= mode << 7;
+ if (mode)
+ seconds = jiffies / HZ - adapter->ahw->idc.sec_counter;
+ else
+ seconds = jiffies / HZ;
+
+ val |= seconds << 8;
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_AUDIT, val);
+ adapter->ahw->idc.sec_counter = jiffies / HZ;
+
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+static void qlcnic_83xx_idc_update_minor_version(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_MIN_VERSION);
+ val = val & ~(0x3 << (adapter->portnum * 2));
+ val = val | (QLC_83XX_IDC_MINOR_VERSION << (adapter->portnum * 2));
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_MIN_VERSION, val);
+}
+
+static int qlcnic_83xx_idc_update_major_version(struct qlcnic_adapter *adapter,
+ int lock)
+{
+ u32 val;
+
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_MAJ_VERSION);
+ val = val & ~0xFF;
+ val = val | QLC_83XX_IDC_MAJOR_VERSION;
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_MAJ_VERSION, val);
+
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+static int
+qlcnic_83xx_idc_update_drv_presence_reg(struct qlcnic_adapter *adapter,
+ int status, int lock)
+{
+ u32 val;
+
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE);
+
+ if (status)
+ val = val | (1 << adapter->portnum);
+ else
+ val = val & ~(1 << adapter->portnum);
+
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE, val);
+ qlcnic_83xx_idc_update_minor_version(adapter);
+
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+static int qlcnic_83xx_idc_check_major_version(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+ u8 version;
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_MAJ_VERSION);
+ version = val & 0xFF;
+
+ if (version != QLC_83XX_IDC_MAJOR_VERSION) {
+ dev_info(&adapter->pdev->dev,
+ "%s:mismatch. version 0x%x, expected version 0x%x\n",
+ __func__, version, QLC_83XX_IDC_MAJOR_VERSION);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int qlcnic_83xx_idc_clear_registers(struct qlcnic_adapter *adapter,
+ int lock)
+{
+ u32 val;
+
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_ACK, 0);
+ /* Clear graceful reset bit */
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ val &= ~QLC_83XX_IDC_GRACEFULL_RESET;
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val);
+
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+static int qlcnic_83xx_idc_update_drv_ack_reg(struct qlcnic_adapter *adapter,
+ int flag, int lock)
+{
+ u32 val;
+
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_ACK);
+ if (flag)
+ val = val | (1 << adapter->portnum);
+ else
+ val = val & ~(1 << adapter->portnum);
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_ACK, val);
+
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+static int qlcnic_83xx_idc_check_timeout(struct qlcnic_adapter *adapter,
+ int time_limit)
+{
+ u64 seconds;
+
+ seconds = jiffies / HZ - adapter->ahw->idc.sec_counter;
+ if (seconds <= time_limit)
+ return 0;
+ else
+ return -EBUSY;
+}
+
+/**
+ * qlcnic_83xx_idc_check_reset_ack_reg
+ *
+ * @adapter: adapter structure
+ *
+ * Check ACK wait limit and clear the functions which failed to ACK
+ *
+ * Return 0 if all functions have acknowledged the reset request.
+ **/
+static int qlcnic_83xx_idc_check_reset_ack_reg(struct qlcnic_adapter *adapter)
+{
+ int timeout;
+ u32 ack, presence, val;
+
+ timeout = QLC_83XX_IDC_RESET_TIMEOUT_SECS;
+ ack = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_ACK);
+ presence = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE);
+ dev_info(&adapter->pdev->dev,
+ "%s: ack = 0x%x, presence = 0x%x\n", __func__, ack, presence);
+ if (!((ack & presence) == presence)) {
+ if (qlcnic_83xx_idc_check_timeout(adapter, timeout)) {
+ /* Clear functions which failed to ACK */
+ dev_info(&adapter->pdev->dev,
+ "%s: ACK wait exceeds time limit\n", __func__);
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE);
+ val = val & ~(ack ^ presence);
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE, val);
+ dev_info(&adapter->pdev->dev,
+ "%s: updated drv presence reg = 0x%x\n",
+ __func__, val);
+ qlcnic_83xx_unlock_driver(adapter);
+ return 0;
+
+ } else {
+ return 1;
+ }
+ } else {
+ dev_info(&adapter->pdev->dev,
+ "%s: Reset ACK received from all functions\n",
+ __func__);
+ return 0;
+ }
+}
+
+/**
+ * qlcnic_83xx_idc_tx_soft_reset
+ *
+ * @adapter: adapter structure
+ *
+ * Handle context deletion and recreation request from transmit routine
+ *
+ * Returns -EBUSY or Success (0)
+ *
+ **/
+static int qlcnic_83xx_idc_tx_soft_reset(struct qlcnic_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ return -EBUSY;
+
+ netif_device_detach(netdev);
+ qlcnic_down(adapter, netdev);
+ qlcnic_up(adapter, netdev);
+ netif_device_attach(netdev);
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ netdev_info(adapter->netdev, "%s: soft reset complete.\n", __func__);
+
+ return 0;
+}
+
+/**
+ * qlcnic_83xx_idc_detach_driver
+ *
+ * @adapter: adapter structure
+ * Detach net interface, stop TX and cleanup resources before the HW reset.
+ * Returns: None
+ *
+ **/
+static void qlcnic_83xx_idc_detach_driver(struct qlcnic_adapter *adapter)
+{
+ int i;
+ struct net_device *netdev = adapter->netdev;
+
+ netif_device_detach(netdev);
+ qlcnic_83xx_detach_mailbox_work(adapter);
+
+ /* Disable mailbox interrupt */
+ qlcnic_83xx_disable_mbx_intr(adapter);
+ qlcnic_down(adapter, netdev);
+ for (i = 0; i < adapter->ahw->num_msix; i++) {
+ adapter->ahw->intr_tbl[i].id = i;
+ adapter->ahw->intr_tbl[i].enabled = 0;
+ adapter->ahw->intr_tbl[i].src = 0;
+ }
+
+ if (qlcnic_sriov_pf_check(adapter))
+ qlcnic_sriov_pf_reset(adapter);
+}
+
+/**
+ * qlcnic_83xx_idc_attach_driver
+ *
+ * @adapter: adapter structure
+ *
+ * Re-attach and re-enable net interface
+ * Returns: None
+ *
+ **/
+static void qlcnic_83xx_idc_attach_driver(struct qlcnic_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ if (netif_running(netdev)) {
+ if (qlcnic_up(adapter, netdev))
+ goto done;
+ qlcnic_restore_indev_addr(netdev, NETDEV_UP);
+ }
+done:
+ netif_device_attach(netdev);
+}
+
+static int qlcnic_83xx_idc_enter_failed_state(struct qlcnic_adapter *adapter,
+ int lock)
+{
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ qlcnic_83xx_idc_clear_registers(adapter, 0);
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE, QLC_83XX_IDC_DEV_FAILED);
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ qlcnic_83xx_idc_log_state_history(adapter);
+ dev_info(&adapter->pdev->dev, "Device will enter failed state\n");
+
+ return 0;
+}
+
+static int qlcnic_83xx_idc_enter_init_state(struct qlcnic_adapter *adapter,
+ int lock)
+{
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE, QLC_83XX_IDC_DEV_INIT);
+
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+static int qlcnic_83xx_idc_enter_need_quiesce(struct qlcnic_adapter *adapter,
+ int lock)
+{
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE,
+ QLC_83XX_IDC_DEV_NEED_QUISCENT);
+
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+static int
+qlcnic_83xx_idc_enter_need_reset_state(struct qlcnic_adapter *adapter, int lock)
+{
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE,
+ QLC_83XX_IDC_DEV_NEED_RESET);
+
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+static int qlcnic_83xx_idc_enter_ready_state(struct qlcnic_adapter *adapter,
+ int lock)
+{
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE, QLC_83XX_IDC_DEV_READY);
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+/**
+ * qlcnic_83xx_idc_find_reset_owner_id
+ *
+ * @adapter: adapter structure
+ *
+ * NIC gets precedence over ISCSI and ISCSI has precedence over FCOE.
+ * Within the same class, function with lowest PCI ID assumes ownership
+ *
+ * Returns: reset owner id or failure indication (-EIO)
+ *
+ **/
+static int qlcnic_83xx_idc_find_reset_owner_id(struct qlcnic_adapter *adapter)
+{
+ u32 reg, reg1, reg2, i, j, owner, class;
+
+ reg1 = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_PARTITION_INFO_1);
+ reg2 = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_PARTITION_INFO_2);
+ owner = QLCNIC_TYPE_NIC;
+ i = 0;
+ j = 0;
+ reg = reg1;
+
+ do {
+ class = (((reg & (0xF << j * 4)) >> j * 4) & 0x3);
+ if (class == owner)
+ break;
+ if (i == (QLC_83XX_IDC_MAX_FUNC_PER_PARTITION_INFO - 1)) {
+ reg = reg2;
+ j = 0;
+ } else {
+ j++;
+ }
+
+ if (i == (QLC_83XX_IDC_MAX_CNA_FUNCTIONS - 1)) {
+ if (owner == QLCNIC_TYPE_NIC)
+ owner = QLCNIC_TYPE_ISCSI;
+ else if (owner == QLCNIC_TYPE_ISCSI)
+ owner = QLCNIC_TYPE_FCOE;
+ else if (owner == QLCNIC_TYPE_FCOE)
+ return -EIO;
+ reg = reg1;
+ j = 0;
+ i = 0;
+ }
+ } while (i++ < QLC_83XX_IDC_MAX_CNA_FUNCTIONS);
+
+ return i;
+}
+
+static int qlcnic_83xx_idc_restart_hw(struct qlcnic_adapter *adapter, int lock)
+{
+ int ret = 0;
+
+ ret = qlcnic_83xx_restart_hw(adapter);
+
+ if (ret) {
+ qlcnic_83xx_idc_enter_failed_state(adapter, lock);
+ } else {
+ qlcnic_83xx_idc_clear_registers(adapter, lock);
+ ret = qlcnic_83xx_idc_enter_ready_state(adapter, lock);
+ }
+
+ return ret;
+}
+
+static int qlcnic_83xx_idc_check_fan_failure(struct qlcnic_adapter *adapter)
+{
+ u32 status;
+
+ status = QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_HALT_STATUS1);
+
+ if (status & QLCNIC_RCODE_FATAL_ERROR) {
+ dev_err(&adapter->pdev->dev,
+ "peg halt status1=0x%x\n", status);
+ if (QLCNIC_FWERROR_CODE(status) == QLCNIC_FWERROR_FAN_FAILURE) {
+ dev_err(&adapter->pdev->dev,
+ "On board active cooling fan failed. "
+ "Device has been halted.\n");
+ dev_err(&adapter->pdev->dev,
+ "Replace the adapter.\n");
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter)
+{
+ int err;
+
+ qlcnic_83xx_reinit_mbx_work(adapter->ahw->mailbox);
+ qlcnic_83xx_enable_mbx_interrupt(adapter);
+
+ qlcnic_83xx_initialize_nic(adapter, 1);
+
+ err = qlcnic_sriov_pf_reinit(adapter);
+ if (err)
+ return err;
+
+ qlcnic_83xx_enable_mbx_interrupt(adapter);
+
+ if (qlcnic_83xx_configure_opmode(adapter)) {
+ qlcnic_83xx_idc_enter_failed_state(adapter, 1);
+ return -EIO;
+ }
+
+ if (adapter->nic_ops->init_driver(adapter)) {
+ qlcnic_83xx_idc_enter_failed_state(adapter, 1);
+ return -EIO;
+ }
+
+ if (adapter->portnum == 0)
+ qlcnic_set_drv_version(adapter);
+
+ qlcnic_dcb_get_info(adapter->dcb);
+ qlcnic_83xx_idc_attach_driver(adapter);
+
+ return 0;
+}
+
+static void qlcnic_83xx_idc_update_idc_params(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ qlcnic_83xx_idc_update_drv_presence_reg(adapter, 1, 1);
+ qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1);
+ set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status);
+
+ ahw->idc.quiesce_req = 0;
+ ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY;
+ ahw->idc.err_code = 0;
+ ahw->idc.collect_dump = 0;
+ ahw->reset_context = 0;
+ adapter->tx_timeo_cnt = 0;
+ ahw->idc.delay_reset = 0;
+
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+}
+
+/**
+ * qlcnic_83xx_idc_ready_state_entry
+ *
+ * @adapter: adapter structure
+ *
+ * Perform ready state initialization, this routine will get invoked only
+ * once from READY state.
+ *
+ * Returns: Error code or Success(0)
+ *
+ **/
+int qlcnic_83xx_idc_ready_state_entry(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (ahw->idc.prev_state != QLC_83XX_IDC_DEV_READY) {
+ qlcnic_83xx_idc_update_idc_params(adapter);
+ /* Re-attach the device if required */
+ if ((ahw->idc.prev_state == QLC_83XX_IDC_DEV_NEED_RESET) ||
+ (ahw->idc.prev_state == QLC_83XX_IDC_DEV_INIT)) {
+ if (qlcnic_83xx_idc_reattach_driver(adapter))
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * qlcnic_83xx_idc_vnic_pf_entry
+ *
+ * @adapter: adapter structure
+ *
+ * Ensure vNIC mode privileged function starts only after vNIC mode is
+ * enabled by management function.
+ * If vNIC mode is ready, start initialization.
+ *
+ * Returns: -EIO or 0
+ *
+ **/
+int qlcnic_83xx_idc_vnic_pf_entry(struct qlcnic_adapter *adapter)
+{
+ u32 state;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ /* Privileged function waits till mgmt function enables VNIC mode */
+ state = QLCRDX(adapter->ahw, QLC_83XX_VNIC_STATE);
+ if (state != QLCNIC_DEV_NPAR_OPER) {
+ if (!ahw->idc.vnic_wait_limit--) {
+ qlcnic_83xx_idc_enter_failed_state(adapter, 1);
+ return -EIO;
+ }
+ dev_info(&adapter->pdev->dev, "vNIC mode disabled\n");
+ return -EIO;
+
+ } else {
+ /* Perform one time initialization from ready state */
+ if (ahw->idc.vnic_state != QLCNIC_DEV_NPAR_OPER) {
+ qlcnic_83xx_idc_update_idc_params(adapter);
+
+ /* If the previous state is UNKNOWN, device will be
+ already attached properly by Init routine*/
+ if (ahw->idc.prev_state != QLC_83XX_IDC_DEV_UNKNOWN) {
+ if (qlcnic_83xx_idc_reattach_driver(adapter))
+ return -EIO;
+ }
+ adapter->ahw->idc.vnic_state = QLCNIC_DEV_NPAR_OPER;
+ dev_info(&adapter->pdev->dev, "vNIC mode enabled\n");
+ }
+ }
+
+ return 0;
+}
+
+static int qlcnic_83xx_idc_unknown_state(struct qlcnic_adapter *adapter)
+{
+ adapter->ahw->idc.err_code = -EIO;
+ dev_err(&adapter->pdev->dev,
+ "%s: Device in unknown state\n", __func__);
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return 0;
+}
+
+/**
+ * qlcnic_83xx_idc_cold_state_handler
+ *
+ * @adapter: adapter structure
+ *
+ * If HW is up and running device will enter READY state.
+ * If firmware image from host needs to be loaded, device is
+ * forced to start with the file firmware image.
+ *
+ * Returns: Error code or Success(0)
+ *
+ **/
+static int qlcnic_83xx_idc_cold_state_handler(struct qlcnic_adapter *adapter)
+{
+ qlcnic_83xx_idc_update_drv_presence_reg(adapter, 1, 0);
+ qlcnic_83xx_idc_update_audit_reg(adapter, 1, 0);
+
+ if (qlcnic_load_fw_file) {
+ qlcnic_83xx_idc_restart_hw(adapter, 0);
+ } else {
+ if (qlcnic_83xx_check_hw_status(adapter)) {
+ qlcnic_83xx_idc_enter_failed_state(adapter, 0);
+ return -EIO;
+ } else {
+ qlcnic_83xx_idc_enter_ready_state(adapter, 0);
+ }
+ }
+ return 0;
+}
+
+/**
+ * qlcnic_83xx_idc_init_state
+ *
+ * @adapter: adapter structure
+ *
+ * Reset owner will restart the device from this state.
+ * Device will enter failed state if it remains
+ * in this state for more than DEV_INIT time limit.
+ *
+ * Returns: Error code or Success(0)
+ *
+ **/
+static int qlcnic_83xx_idc_init_state(struct qlcnic_adapter *adapter)
+{
+ int timeout, ret = 0;
+ u32 owner;
+
+ timeout = QLC_83XX_IDC_INIT_TIMEOUT_SECS;
+ if (adapter->ahw->idc.prev_state == QLC_83XX_IDC_DEV_NEED_RESET) {
+ owner = qlcnic_83xx_idc_find_reset_owner_id(adapter);
+ if (adapter->ahw->pci_func == owner)
+ ret = qlcnic_83xx_idc_restart_hw(adapter, 1);
+ } else {
+ ret = qlcnic_83xx_idc_check_timeout(adapter, timeout);
+ }
+
+ return ret;
+}
+
+/**
+ * qlcnic_83xx_idc_ready_state
+ *
+ * @adapter: adapter structure
+ *
+ * Perform IDC protocol specicifed actions after monitoring device state and
+ * events.
+ *
+ * Returns: Error code or Success(0)
+ *
+ **/
+static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_mailbox *mbx = ahw->mailbox;
+ int ret = 0;
+ u32 val;
+
+ /* Perform NIC configuration based ready state entry actions */
+ if (ahw->idc.state_entry(adapter))
+ return -EIO;
+
+ if (qlcnic_check_temp(adapter)) {
+ if (ahw->temp == QLCNIC_TEMP_PANIC) {
+ qlcnic_83xx_idc_check_fan_failure(adapter);
+ dev_err(&adapter->pdev->dev,
+ "Error: device temperature %d above limits\n",
+ adapter->ahw->temp);
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+ set_bit(__QLCNIC_RESETTING, &adapter->state);
+ qlcnic_83xx_idc_detach_driver(adapter);
+ qlcnic_83xx_idc_enter_failed_state(adapter, 1);
+ return -EIO;
+ }
+ }
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ ret = qlcnic_83xx_check_heartbeat(adapter);
+ if (ret) {
+ adapter->flags |= QLCNIC_FW_HANG;
+ if (!(val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY)) {
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+ set_bit(__QLCNIC_RESETTING, &adapter->state);
+ qlcnic_83xx_idc_enter_need_reset_state(adapter, 1);
+ } else {
+ netdev_info(adapter->netdev, "%s: Auto firmware recovery is disabled\n",
+ __func__);
+ qlcnic_83xx_idc_enter_failed_state(adapter, 1);
+ }
+ return -EIO;
+ }
+
+ if ((val & QLC_83XX_IDC_GRACEFULL_RESET) || ahw->idc.collect_dump) {
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+
+ /* Move to need reset state and prepare for reset */
+ qlcnic_83xx_idc_enter_need_reset_state(adapter, 1);
+ return ret;
+ }
+
+ /* Check for soft reset request */
+ if (ahw->reset_context &&
+ !(val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY)) {
+ adapter->ahw->reset_context = 0;
+ qlcnic_83xx_idc_tx_soft_reset(adapter);
+ return ret;
+ }
+
+ /* Move to need quiesce state if requested */
+ if (adapter->ahw->idc.quiesce_req) {
+ qlcnic_83xx_idc_enter_need_quiesce(adapter, 1);
+ qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1);
+ return ret;
+ }
+
+ return ret;
+}
+
+/**
+ * qlcnic_83xx_idc_need_reset_state
+ *
+ * @adapter: adapter structure
+ *
+ * Device will remain in this state until:
+ * Reset request ACK's are received from all the functions
+ * Wait time exceeds max time limit
+ *
+ * Returns: Error code or Success(0)
+ *
+ **/
+static int qlcnic_83xx_idc_need_reset_state(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
+ int ret = 0;
+
+ if (adapter->ahw->idc.prev_state != QLC_83XX_IDC_DEV_NEED_RESET) {
+ qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1);
+ set_bit(__QLCNIC_RESETTING, &adapter->state);
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+ if (adapter->ahw->nic_mode == QLCNIC_VNIC_MODE)
+ qlcnic_83xx_disable_vnic_mode(adapter, 1);
+
+ if (qlcnic_check_diag_status(adapter)) {
+ dev_info(&adapter->pdev->dev,
+ "%s: Wait for diag completion\n", __func__);
+ adapter->ahw->idc.delay_reset = 1;
+ return 0;
+ } else {
+ qlcnic_83xx_idc_update_drv_ack_reg(adapter, 1, 1);
+ qlcnic_83xx_idc_detach_driver(adapter);
+ }
+ }
+
+ if (qlcnic_check_diag_status(adapter)) {
+ dev_info(&adapter->pdev->dev,
+ "%s: Wait for diag completion\n", __func__);
+ return -1;
+ } else {
+ if (adapter->ahw->idc.delay_reset) {
+ qlcnic_83xx_idc_update_drv_ack_reg(adapter, 1, 1);
+ qlcnic_83xx_idc_detach_driver(adapter);
+ adapter->ahw->idc.delay_reset = 0;
+ }
+
+ /* Check for ACK from other functions */
+ ret = qlcnic_83xx_idc_check_reset_ack_reg(adapter);
+ if (ret) {
+ dev_info(&adapter->pdev->dev,
+ "%s: Waiting for reset ACK\n", __func__);
+ return -1;
+ }
+ }
+
+ /* Transit to INIT state and restart the HW */
+ qlcnic_83xx_idc_enter_init_state(adapter, 1);
+
+ return ret;
+}
+
+static int qlcnic_83xx_idc_need_quiesce_state(struct qlcnic_adapter *adapter)
+{
+ dev_err(&adapter->pdev->dev, "%s: TBD\n", __func__);
+ return 0;
+}
+
+static void qlcnic_83xx_idc_failed_state(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u32 val, owner;
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ if (val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) {
+ owner = qlcnic_83xx_idc_find_reset_owner_id(adapter);
+ if (ahw->pci_func == owner) {
+ qlcnic_83xx_stop_hw(adapter);
+ qlcnic_dump_fw(adapter);
+ }
+ }
+
+ netdev_warn(adapter->netdev, "%s: Reboot will be required to recover the adapter!!\n",
+ __func__);
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ ahw->idc.err_code = -EIO;
+
+ return;
+}
+
+static int qlcnic_83xx_idc_quiesce_state(struct qlcnic_adapter *adapter)
+{
+ dev_info(&adapter->pdev->dev, "%s: TBD\n", __func__);
+ return 0;
+}
+
+static int qlcnic_83xx_idc_check_state_validity(struct qlcnic_adapter *adapter,
+ u32 state)
+{
+ u32 cur, prev, next;
+
+ cur = adapter->ahw->idc.curr_state;
+ prev = adapter->ahw->idc.prev_state;
+ next = state;
+
+ if ((next < QLC_83XX_IDC_DEV_COLD) ||
+ (next > QLC_83XX_IDC_DEV_QUISCENT)) {
+ dev_err(&adapter->pdev->dev,
+ "%s: curr %d, prev %d, next state %d is invalid\n",
+ __func__, cur, prev, state);
+ return 1;
+ }
+
+ if ((cur == QLC_83XX_IDC_DEV_UNKNOWN) &&
+ (prev == QLC_83XX_IDC_DEV_UNKNOWN)) {
+ if ((next != QLC_83XX_IDC_DEV_COLD) &&
+ (next != QLC_83XX_IDC_DEV_READY)) {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed, cur %d prev %d next %d\n",
+ __func__, cur, prev, next);
+ return 1;
+ }
+ }
+
+ if (next == QLC_83XX_IDC_DEV_INIT) {
+ if ((prev != QLC_83XX_IDC_DEV_INIT) &&
+ (prev != QLC_83XX_IDC_DEV_COLD) &&
+ (prev != QLC_83XX_IDC_DEV_NEED_RESET)) {
+ dev_err(&adapter->pdev->dev,
+ "%s: failed, cur %d prev %d next %d\n",
+ __func__, cur, prev, next);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+#define QLC_83XX_ENCAP_TYPE_VXLAN BIT_1
+#define QLC_83XX_MATCH_ENCAP_ID BIT_2
+#define QLC_83XX_SET_VXLAN_UDP_DPORT BIT_3
+#define QLC_83XX_VXLAN_UDP_DPORT(PORT) ((PORT & 0xffff) << 16)
+
+#define QLCNIC_ENABLE_INGRESS_ENCAP_PARSING 1
+#define QLCNIC_DISABLE_INGRESS_ENCAP_PARSING 0
+
+int qlcnic_set_vxlan_port(struct qlcnic_adapter *adapter, u16 port)
+{
+ struct qlcnic_cmd_args cmd;
+ int ret = 0;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ ret = qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_INIT_NIC_FUNC);
+ if (ret)
+ return ret;
+
+ cmd.req.arg[1] = QLC_83XX_MULTI_TENANCY_INFO;
+ cmd.req.arg[2] = QLC_83XX_ENCAP_TYPE_VXLAN |
+ QLC_83XX_SET_VXLAN_UDP_DPORT |
+ QLC_83XX_VXLAN_UDP_DPORT(port);
+
+ ret = qlcnic_issue_cmd(adapter, &cmd);
+ if (ret)
+ netdev_err(adapter->netdev,
+ "Failed to set VXLAN port %d in adapter\n",
+ port);
+
+ qlcnic_free_mbx_args(&cmd);
+
+ return ret;
+}
+
+int qlcnic_set_vxlan_parsing(struct qlcnic_adapter *adapter, u16 port)
+{
+ struct qlcnic_cmd_args cmd;
+ int ret = 0;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ ret = qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_SET_INGRESS_ENCAP);
+ if (ret)
+ return ret;
+
+ cmd.req.arg[1] = port ? QLCNIC_ENABLE_INGRESS_ENCAP_PARSING :
+ QLCNIC_DISABLE_INGRESS_ENCAP_PARSING;
+
+ ret = qlcnic_issue_cmd(adapter, &cmd);
+ if (ret)
+ netdev_err(adapter->netdev,
+ "Failed to %s VXLAN parsing for port %d\n",
+ port ? "enable" : "disable", port);
+ else
+ netdev_info(adapter->netdev,
+ "%s VXLAN parsing for port %d\n",
+ port ? "Enabled" : "Disabled", port);
+
+ qlcnic_free_mbx_args(&cmd);
+
+ return ret;
+}
+
+static void qlcnic_83xx_periodic_tasks(struct qlcnic_adapter *adapter)
+{
+ if (adapter->fhash.fnum)
+ qlcnic_prune_lb_filters(adapter);
+}
+
+/**
+ * qlcnic_83xx_idc_poll_dev_state
+ *
+ * @work: kernel work queue structure used to schedule the function
+ *
+ * Poll device state periodically and perform state specific
+ * actions defined by Inter Driver Communication (IDC) protocol.
+ *
+ * Returns: None
+ *
+ **/
+void qlcnic_83xx_idc_poll_dev_state(struct work_struct *work)
+{
+ struct qlcnic_adapter *adapter;
+ u32 state;
+
+ adapter = container_of(work, struct qlcnic_adapter, fw_work.work);
+ state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE);
+
+ if (qlcnic_83xx_idc_check_state_validity(adapter, state)) {
+ qlcnic_83xx_idc_log_state_history(adapter);
+ adapter->ahw->idc.curr_state = QLC_83XX_IDC_DEV_UNKNOWN;
+ } else {
+ adapter->ahw->idc.curr_state = state;
+ }
+
+ switch (adapter->ahw->idc.curr_state) {
+ case QLC_83XX_IDC_DEV_READY:
+ qlcnic_83xx_idc_ready_state(adapter);
+ break;
+ case QLC_83XX_IDC_DEV_NEED_RESET:
+ qlcnic_83xx_idc_need_reset_state(adapter);
+ break;
+ case QLC_83XX_IDC_DEV_NEED_QUISCENT:
+ qlcnic_83xx_idc_need_quiesce_state(adapter);
+ break;
+ case QLC_83XX_IDC_DEV_FAILED:
+ qlcnic_83xx_idc_failed_state(adapter);
+ return;
+ case QLC_83XX_IDC_DEV_INIT:
+ qlcnic_83xx_idc_init_state(adapter);
+ break;
+ case QLC_83XX_IDC_DEV_QUISCENT:
+ qlcnic_83xx_idc_quiesce_state(adapter);
+ break;
+ default:
+ qlcnic_83xx_idc_unknown_state(adapter);
+ return;
+ }
+ adapter->ahw->idc.prev_state = adapter->ahw->idc.curr_state;
+ qlcnic_83xx_periodic_tasks(adapter);
+
+ /* Re-schedule the function */
+ if (test_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status))
+ qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state,
+ adapter->ahw->idc.delay);
+}
+
+static void qlcnic_83xx_setup_idc_parameters(struct qlcnic_adapter *adapter)
+{
+ u32 idc_params, val;
+
+ if (qlcnic_83xx_flash_read32(adapter, QLC_83XX_IDC_FLASH_PARAM_ADDR,
+ (u8 *)&idc_params, 1)) {
+ dev_info(&adapter->pdev->dev,
+ "%s:failed to get IDC params from flash\n", __func__);
+ adapter->dev_init_timeo = QLC_83XX_IDC_INIT_TIMEOUT_SECS;
+ adapter->reset_ack_timeo = QLC_83XX_IDC_RESET_TIMEOUT_SECS;
+ } else {
+ adapter->dev_init_timeo = idc_params & 0xFFFF;
+ adapter->reset_ack_timeo = ((idc_params >> 16) & 0xFFFF);
+ }
+
+ adapter->ahw->idc.curr_state = QLC_83XX_IDC_DEV_UNKNOWN;
+ adapter->ahw->idc.prev_state = QLC_83XX_IDC_DEV_UNKNOWN;
+ adapter->ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY;
+ adapter->ahw->idc.err_code = 0;
+ adapter->ahw->idc.collect_dump = 0;
+ adapter->ahw->idc.name = (char **)qlc_83xx_idc_states;
+
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status);
+
+ /* Check if reset recovery is disabled */
+ if (!qlcnic_auto_fw_reset) {
+ /* Propagate do not reset request to other functions */
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ val = val | QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY;
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val);
+ }
+}
+
+static int
+qlcnic_83xx_idc_first_to_load_function_handler(struct qlcnic_adapter *adapter)
+{
+ u32 state, val;
+
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EIO;
+
+ /* Clear driver lock register */
+ QLCWRX(adapter->ahw, QLC_83XX_RECOVER_DRV_LOCK, 0);
+ if (qlcnic_83xx_idc_update_major_version(adapter, 0)) {
+ qlcnic_83xx_unlock_driver(adapter);
+ return -EIO;
+ }
+
+ state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE);
+ if (qlcnic_83xx_idc_check_state_validity(adapter, state)) {
+ qlcnic_83xx_unlock_driver(adapter);
+ return -EIO;
+ }
+
+ if (state != QLC_83XX_IDC_DEV_COLD && qlcnic_load_fw_file) {
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DEV_STATE,
+ QLC_83XX_IDC_DEV_COLD);
+ state = QLC_83XX_IDC_DEV_COLD;
+ }
+
+ adapter->ahw->idc.curr_state = state;
+ /* First to load function should cold boot the device */
+ if (state == QLC_83XX_IDC_DEV_COLD)
+ qlcnic_83xx_idc_cold_state_handler(adapter);
+
+ /* Check if reset recovery is enabled */
+ if (qlcnic_auto_fw_reset) {
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ val = val & ~QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY;
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val);
+ }
+
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+int qlcnic_83xx_idc_init(struct qlcnic_adapter *adapter)
+{
+ int ret = -EIO;
+
+ qlcnic_83xx_setup_idc_parameters(adapter);
+
+ if (qlcnic_83xx_get_reset_instruction_template(adapter))
+ return ret;
+
+ if (!qlcnic_83xx_idc_check_driver_presence_reg(adapter)) {
+ if (qlcnic_83xx_idc_first_to_load_function_handler(adapter))
+ return -EIO;
+ } else {
+ if (qlcnic_83xx_idc_check_major_version(adapter))
+ return -EIO;
+ }
+
+ qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1);
+
+ return 0;
+}
+
+void qlcnic_83xx_idc_exit(struct qlcnic_adapter *adapter)
+{
+ int id;
+ u32 val;
+
+ while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ usleep_range(10000, 11000);
+
+ id = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK_ID);
+ id = id & 0xFF;
+
+ if (id == adapter->portnum) {
+ dev_err(&adapter->pdev->dev,
+ "%s: wait for lock recovery.. %d\n", __func__, id);
+ msleep(20);
+ id = QLCRDX(adapter->ahw, QLC_83XX_DRV_LOCK_ID);
+ id = id & 0xFF;
+ }
+
+ /* Clear driver presence bit */
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE);
+ val = val & ~(1 << adapter->portnum);
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE, val);
+ clear_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status);
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+
+ cancel_delayed_work_sync(&adapter->fw_work);
+}
+
+void qlcnic_83xx_idc_request_reset(struct qlcnic_adapter *adapter, u32 key)
+{
+ u32 val;
+
+ if (qlcnic_sriov_vf_check(adapter))
+ return;
+
+ if (qlcnic_83xx_lock_driver(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "%s:failed, please retry\n", __func__);
+ return;
+ }
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ if (val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) {
+ netdev_info(adapter->netdev, "%s: Auto firmware recovery is disabled\n",
+ __func__);
+ qlcnic_83xx_idc_enter_failed_state(adapter, 0);
+ qlcnic_83xx_unlock_driver(adapter);
+ return;
+ }
+
+ if (key == QLCNIC_FORCE_FW_RESET) {
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ val = val | QLC_83XX_IDC_GRACEFULL_RESET;
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val);
+ } else if (key == QLCNIC_FORCE_FW_DUMP_KEY) {
+ adapter->ahw->idc.collect_dump = 1;
+ }
+
+ qlcnic_83xx_unlock_driver(adapter);
+ return;
+}
+
+static int qlcnic_83xx_copy_bootloader(struct qlcnic_adapter *adapter)
+{
+ u8 *p_cache;
+ u32 src, size;
+ u64 dest;
+ int ret = -EIO;
+
+ src = QLC_83XX_BOOTLOADER_FLASH_ADDR;
+ dest = QLCRDX(adapter->ahw, QLCNIC_BOOTLOADER_ADDR);
+ size = QLCRDX(adapter->ahw, QLCNIC_BOOTLOADER_SIZE);
+
+ /* alignment check */
+ if (size & 0xF)
+ size = (size + 16) & ~0xF;
+
+ p_cache = vzalloc(size);
+ if (p_cache == NULL)
+ return -ENOMEM;
+
+ ret = qlcnic_83xx_lockless_flash_read32(adapter, src, p_cache,
+ size / sizeof(u32));
+ if (ret) {
+ vfree(p_cache);
+ return ret;
+ }
+ /* 16 byte write to MS memory */
+ ret = qlcnic_ms_mem_write128(adapter, dest, (u32 *)p_cache,
+ size / 16);
+ if (ret) {
+ vfree(p_cache);
+ return ret;
+ }
+ vfree(p_cache);
+
+ return ret;
+}
+
+static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
+{
+ struct qlc_83xx_fw_info *fw_info = adapter->ahw->fw_info;
+ const struct firmware *fw = fw_info->fw;
+ u32 dest, *p_cache, *temp;
+ __le32 *temp_le;
+ u8 data[16];
+ size_t size;
+ int i, ret;
+ u64 addr;
+
+ temp = vzalloc(fw->size);
+ if (!temp) {
+ release_firmware(fw);
+ fw_info->fw = NULL;
+ return -ENOMEM;
+ }
+
+ temp_le = (__le32 *)fw->data;
+
+ /* FW image in file is in little endian, swap the data to nullify
+ * the effect of writel() operation on big endian platform.
+ */
+ for (i = 0; i < fw->size / sizeof(u32); i++)
+ temp[i] = __le32_to_cpu(temp_le[i]);
+
+ dest = QLCRDX(adapter->ahw, QLCNIC_FW_IMAGE_ADDR);
+ size = (fw->size & ~0xF);
+ p_cache = temp;
+ addr = (u64)dest;
+
+ ret = qlcnic_ms_mem_write128(adapter, addr,
+ p_cache, size / 16);
+ if (ret) {
+ dev_err(&adapter->pdev->dev, "MS memory write failed\n");
+ goto exit;
+ }
+
+ /* alignment check */
+ if (fw->size & 0xF) {
+ addr = dest + size;
+ for (i = 0; i < (fw->size & 0xF); i++)
+ data[i] = ((u8 *)temp)[size + i];
+ for (; i < 16; i++)
+ data[i] = 0;
+ ret = qlcnic_ms_mem_write128(adapter, addr,
+ (u32 *)data, 1);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "MS memory write failed\n");
+ goto exit;
+ }
+ }
+
+exit:
+ release_firmware(fw);
+ fw_info->fw = NULL;
+ vfree(temp);
+
+ return ret;
+}
+
+static void qlcnic_83xx_dump_pause_control_regs(struct qlcnic_adapter *adapter)
+{
+ int i, j;
+ u32 val = 0, val1 = 0, reg = 0;
+ int err = 0;
+
+ val = QLCRD32(adapter, QLC_83XX_SRE_SHIM_REG, &err);
+ if (err == -EIO)
+ return;
+ dev_info(&adapter->pdev->dev, "SRE-Shim Ctrl:0x%x\n", val);
+
+ for (j = 0; j < 2; j++) {
+ if (j == 0) {
+ dev_info(&adapter->pdev->dev,
+ "Port 0 RxB Pause Threshold Regs[TC7..TC0]:");
+ reg = QLC_83XX_PORT0_THRESHOLD;
+ } else if (j == 1) {
+ dev_info(&adapter->pdev->dev,
+ "Port 1 RxB Pause Threshold Regs[TC7..TC0]:");
+ reg = QLC_83XX_PORT1_THRESHOLD;
+ }
+ for (i = 0; i < 8; i++) {
+ val = QLCRD32(adapter, reg + (i * 0x4), &err);
+ if (err == -EIO)
+ return;
+ dev_info(&adapter->pdev->dev, "0x%x ", val);
+ }
+ dev_info(&adapter->pdev->dev, "\n");
+ }
+
+ for (j = 0; j < 2; j++) {
+ if (j == 0) {
+ dev_info(&adapter->pdev->dev,
+ "Port 0 RxB TC Max Cell Registers[4..1]:");
+ reg = QLC_83XX_PORT0_TC_MC_REG;
+ } else if (j == 1) {
+ dev_info(&adapter->pdev->dev,
+ "Port 1 RxB TC Max Cell Registers[4..1]:");
+ reg = QLC_83XX_PORT1_TC_MC_REG;
+ }
+ for (i = 0; i < 4; i++) {
+ val = QLCRD32(adapter, reg + (i * 0x4), &err);
+ if (err == -EIO)
+ return;
+ dev_info(&adapter->pdev->dev, "0x%x ", val);
+ }
+ dev_info(&adapter->pdev->dev, "\n");
+ }
+
+ for (j = 0; j < 2; j++) {
+ if (j == 0) {
+ dev_info(&adapter->pdev->dev,
+ "Port 0 RxB Rx TC Stats[TC7..TC0]:");
+ reg = QLC_83XX_PORT0_TC_STATS;
+ } else if (j == 1) {
+ dev_info(&adapter->pdev->dev,
+ "Port 1 RxB Rx TC Stats[TC7..TC0]:");
+ reg = QLC_83XX_PORT1_TC_STATS;
+ }
+ for (i = 7; i >= 0; i--) {
+ val = QLCRD32(adapter, reg, &err);
+ if (err == -EIO)
+ return;
+ val &= ~(0x7 << 29); /* Reset bits 29 to 31 */
+ QLCWR32(adapter, reg, (val | (i << 29)));
+ val = QLCRD32(adapter, reg, &err);
+ if (err == -EIO)
+ return;
+ dev_info(&adapter->pdev->dev, "0x%x ", val);
+ }
+ dev_info(&adapter->pdev->dev, "\n");
+ }
+
+ val = QLCRD32(adapter, QLC_83XX_PORT2_IFB_THRESHOLD, &err);
+ if (err == -EIO)
+ return;
+ val1 = QLCRD32(adapter, QLC_83XX_PORT3_IFB_THRESHOLD, &err);
+ if (err == -EIO)
+ return;
+ dev_info(&adapter->pdev->dev,
+ "IFB-Pause Thresholds: Port 2:0x%x, Port 3:0x%x\n",
+ val, val1);
+}
+
+
+static void qlcnic_83xx_disable_pause_frames(struct qlcnic_adapter *adapter)
+{
+ u32 reg = 0, i, j;
+
+ if (qlcnic_83xx_lock_driver(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "%s:failed to acquire driver lock\n", __func__);
+ return;
+ }
+
+ qlcnic_83xx_dump_pause_control_regs(adapter);
+ QLCWR32(adapter, QLC_83XX_SRE_SHIM_REG, 0x0);
+
+ for (j = 0; j < 2; j++) {
+ if (j == 0)
+ reg = QLC_83XX_PORT0_THRESHOLD;
+ else if (j == 1)
+ reg = QLC_83XX_PORT1_THRESHOLD;
+
+ for (i = 0; i < 8; i++)
+ QLCWR32(adapter, reg + (i * 0x4), 0x0);
+ }
+
+ for (j = 0; j < 2; j++) {
+ if (j == 0)
+ reg = QLC_83XX_PORT0_TC_MC_REG;
+ else if (j == 1)
+ reg = QLC_83XX_PORT1_TC_MC_REG;
+
+ for (i = 0; i < 4; i++)
+ QLCWR32(adapter, reg + (i * 0x4), 0x03FF03FF);
+ }
+
+ QLCWR32(adapter, QLC_83XX_PORT2_IFB_THRESHOLD, 0);
+ QLCWR32(adapter, QLC_83XX_PORT3_IFB_THRESHOLD, 0);
+ dev_info(&adapter->pdev->dev,
+ "Disabled pause frames successfully on all ports\n");
+ qlcnic_83xx_unlock_driver(adapter);
+}
+
+static void qlcnic_83xx_take_eport_out_of_reset(struct qlcnic_adapter *adapter)
+{
+ QLCWR32(adapter, QLC_83XX_RESET_REG, 0);
+ QLCWR32(adapter, QLC_83XX_RESET_PORT0, 0);
+ QLCWR32(adapter, QLC_83XX_RESET_PORT1, 0);
+ QLCWR32(adapter, QLC_83XX_RESET_PORT2, 0);
+ QLCWR32(adapter, QLC_83XX_RESET_PORT3, 0);
+ QLCWR32(adapter, QLC_83XX_RESET_SRESHIM, 0);
+ QLCWR32(adapter, QLC_83XX_RESET_EPGSHIM, 0);
+ QLCWR32(adapter, QLC_83XX_RESET_ETHERPCS, 0);
+ QLCWR32(adapter, QLC_83XX_RESET_CONTROL, 1);
+}
+
+static int qlcnic_83xx_check_heartbeat(struct qlcnic_adapter *p_dev)
+{
+ u32 heartbeat, peg_status;
+ int retries, ret = -EIO, err = 0;
+
+ retries = QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT;
+ p_dev->heartbeat = QLC_SHARED_REG_RD32(p_dev,
+ QLCNIC_PEG_ALIVE_COUNTER);
+
+ do {
+ msleep(QLCNIC_HEARTBEAT_PERIOD_MSECS);
+ heartbeat = QLC_SHARED_REG_RD32(p_dev,
+ QLCNIC_PEG_ALIVE_COUNTER);
+ if (heartbeat != p_dev->heartbeat) {
+ ret = QLCNIC_RCODE_SUCCESS;
+ break;
+ }
+ } while (--retries);
+
+ if (ret) {
+ dev_err(&p_dev->pdev->dev, "firmware hang detected\n");
+ qlcnic_83xx_take_eport_out_of_reset(p_dev);
+ qlcnic_83xx_disable_pause_frames(p_dev);
+ peg_status = QLC_SHARED_REG_RD32(p_dev,
+ QLCNIC_PEG_HALT_STATUS1);
+ dev_info(&p_dev->pdev->dev, "Dumping HW/FW registers\n"
+ "PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n"
+ "PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n"
+ "PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n"
+ "PEG_NET_4_PC: 0x%x\n", peg_status,
+ QLC_SHARED_REG_RD32(p_dev, QLCNIC_PEG_HALT_STATUS2),
+ QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_0, &err),
+ QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_1, &err),
+ QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_2, &err),
+ QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_3, &err),
+ QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_4, &err));
+
+ if (QLCNIC_FWERROR_CODE(peg_status) == 0x67)
+ dev_err(&p_dev->pdev->dev,
+ "Device is being reset err code 0x00006700.\n");
+ }
+
+ return ret;
+}
+
+static int qlcnic_83xx_check_cmd_peg_status(struct qlcnic_adapter *p_dev)
+{
+ int retries = QLCNIC_CMDPEG_CHECK_RETRY_COUNT;
+ u32 val;
+
+ do {
+ val = QLC_SHARED_REG_RD32(p_dev, QLCNIC_CMDPEG_STATE);
+ if (val == QLC_83XX_CMDPEG_COMPLETE)
+ return 0;
+ msleep(QLCNIC_CMDPEG_CHECK_DELAY);
+ } while (--retries);
+
+ dev_err(&p_dev->pdev->dev, "%s: failed, state = 0x%x\n", __func__, val);
+ return -EIO;
+}
+
+static int qlcnic_83xx_check_hw_status(struct qlcnic_adapter *p_dev)
+{
+ int err;
+
+ err = qlcnic_83xx_check_cmd_peg_status(p_dev);
+ if (err)
+ return err;
+
+ err = qlcnic_83xx_check_heartbeat(p_dev);
+ if (err)
+ return err;
+
+ return err;
+}
+
+static int qlcnic_83xx_poll_reg(struct qlcnic_adapter *p_dev, u32 addr,
+ int duration, u32 mask, u32 status)
+{
+ int timeout_error, err = 0;
+ u32 value;
+ u8 retries;
+
+ value = QLCRD32(p_dev, addr, &err);
+ if (err == -EIO)
+ return err;
+ retries = duration / 10;
+
+ do {
+ if ((value & mask) != status) {
+ timeout_error = 1;
+ msleep(duration / 10);
+ value = QLCRD32(p_dev, addr, &err);
+ if (err == -EIO)
+ return err;
+ } else {
+ timeout_error = 0;
+ break;
+ }
+ } while (retries--);
+
+ if (timeout_error) {
+ p_dev->ahw->reset.seq_error++;
+ dev_err(&p_dev->pdev->dev,
+ "%s: Timeout Err, entry_num = %d\n",
+ __func__, p_dev->ahw->reset.seq_index);
+ dev_err(&p_dev->pdev->dev,
+ "0x%08x 0x%08x 0x%08x\n",
+ value, mask, status);
+ }
+
+ return timeout_error;
+}
+
+static int qlcnic_83xx_reset_template_checksum(struct qlcnic_adapter *p_dev)
+{
+ u32 sum = 0;
+ u16 *buff = (u16 *)p_dev->ahw->reset.buff;
+ int count = p_dev->ahw->reset.hdr->size / sizeof(u16);
+
+ while (count-- > 0)
+ sum += *buff++;
+
+ while (sum >> 16)
+ sum = (sum & 0xFFFF) + (sum >> 16);
+
+ if (~sum) {
+ return 0;
+ } else {
+ dev_err(&p_dev->pdev->dev, "%s: failed\n", __func__);
+ return -1;
+ }
+}
+
+static int qlcnic_83xx_get_reset_instruction_template(struct qlcnic_adapter *p_dev)
+{
+ struct qlcnic_hardware_context *ahw = p_dev->ahw;
+ u32 addr, count, prev_ver, curr_ver;
+ u8 *p_buff;
+
+ if (ahw->reset.buff != NULL) {
+ prev_ver = p_dev->fw_version;
+ curr_ver = qlcnic_83xx_get_fw_version(p_dev);
+ if (curr_ver > prev_ver)
+ kfree(ahw->reset.buff);
+ else
+ return 0;
+ }
+
+ ahw->reset.seq_error = 0;
+ ahw->reset.buff = kzalloc(QLC_83XX_RESTART_TEMPLATE_SIZE, GFP_KERNEL);
+ if (ahw->reset.buff == NULL)
+ return -ENOMEM;
+
+ p_buff = p_dev->ahw->reset.buff;
+ addr = QLC_83XX_RESET_TEMPLATE_ADDR;
+ count = sizeof(struct qlc_83xx_reset_hdr) / sizeof(u32);
+
+ /* Copy template header from flash */
+ if (qlcnic_83xx_flash_read32(p_dev, addr, p_buff, count)) {
+ dev_err(&p_dev->pdev->dev, "%s: flash read failed\n", __func__);
+ return -EIO;
+ }
+ ahw->reset.hdr = (struct qlc_83xx_reset_hdr *)ahw->reset.buff;
+ addr = QLC_83XX_RESET_TEMPLATE_ADDR + ahw->reset.hdr->hdr_size;
+ p_buff = ahw->reset.buff + ahw->reset.hdr->hdr_size;
+ count = (ahw->reset.hdr->size - ahw->reset.hdr->hdr_size) / sizeof(u32);
+
+ /* Copy rest of the template */
+ if (qlcnic_83xx_flash_read32(p_dev, addr, p_buff, count)) {
+ dev_err(&p_dev->pdev->dev, "%s: flash read failed\n", __func__);
+ return -EIO;
+ }
+
+ if (qlcnic_83xx_reset_template_checksum(p_dev))
+ return -EIO;
+ /* Get Stop, Start and Init command offsets */
+ ahw->reset.init_offset = ahw->reset.buff + ahw->reset.hdr->init_offset;
+ ahw->reset.start_offset = ahw->reset.buff +
+ ahw->reset.hdr->start_offset;
+ ahw->reset.stop_offset = ahw->reset.buff + ahw->reset.hdr->hdr_size;
+ return 0;
+}
+
+/* Read Write HW register command */
+static void qlcnic_83xx_read_write_crb_reg(struct qlcnic_adapter *p_dev,
+ u32 raddr, u32 waddr)
+{
+ int err = 0;
+ u32 value;
+
+ value = QLCRD32(p_dev, raddr, &err);
+ if (err == -EIO)
+ return;
+ qlcnic_83xx_wrt_reg_indirect(p_dev, waddr, value);
+}
+
+/* Read Modify Write HW register command */
+static void qlcnic_83xx_rmw_crb_reg(struct qlcnic_adapter *p_dev,
+ u32 raddr, u32 waddr,
+ struct qlc_83xx_rmw *p_rmw_hdr)
+{
+ int err = 0;
+ u32 value;
+
+ if (p_rmw_hdr->index_a) {
+ value = p_dev->ahw->reset.array[p_rmw_hdr->index_a];
+ } else {
+ value = QLCRD32(p_dev, raddr, &err);
+ if (err == -EIO)
+ return;
+ }
+
+ value &= p_rmw_hdr->mask;
+ value <<= p_rmw_hdr->shl;
+ value >>= p_rmw_hdr->shr;
+ value |= p_rmw_hdr->or_value;
+ value ^= p_rmw_hdr->xor_value;
+ qlcnic_83xx_wrt_reg_indirect(p_dev, waddr, value);
+}
+
+/* Write HW register command */
+static void qlcnic_83xx_write_list(struct qlcnic_adapter *p_dev,
+ struct qlc_83xx_entry_hdr *p_hdr)
+{
+ int i;
+ struct qlc_83xx_entry *entry;
+
+ entry = (struct qlc_83xx_entry *)((char *)p_hdr +
+ sizeof(struct qlc_83xx_entry_hdr));
+
+ for (i = 0; i < p_hdr->count; i++, entry++) {
+ qlcnic_83xx_wrt_reg_indirect(p_dev, entry->arg1,
+ entry->arg2);
+ if (p_hdr->delay)
+ udelay((u32)(p_hdr->delay));
+ }
+}
+
+/* Read and Write instruction */
+static void qlcnic_83xx_read_write_list(struct qlcnic_adapter *p_dev,
+ struct qlc_83xx_entry_hdr *p_hdr)
+{
+ int i;
+ struct qlc_83xx_entry *entry;
+
+ entry = (struct qlc_83xx_entry *)((char *)p_hdr +
+ sizeof(struct qlc_83xx_entry_hdr));
+
+ for (i = 0; i < p_hdr->count; i++, entry++) {
+ qlcnic_83xx_read_write_crb_reg(p_dev, entry->arg1,
+ entry->arg2);
+ if (p_hdr->delay)
+ udelay((u32)(p_hdr->delay));
+ }
+}
+
+/* Poll HW register command */
+static void qlcnic_83xx_poll_list(struct qlcnic_adapter *p_dev,
+ struct qlc_83xx_entry_hdr *p_hdr)
+{
+ long delay;
+ struct qlc_83xx_entry *entry;
+ struct qlc_83xx_poll *poll;
+ int i, err = 0;
+ unsigned long arg1, arg2;
+
+ poll = (struct qlc_83xx_poll *)((char *)p_hdr +
+ sizeof(struct qlc_83xx_entry_hdr));
+
+ entry = (struct qlc_83xx_entry *)((char *)poll +
+ sizeof(struct qlc_83xx_poll));
+ delay = (long)p_hdr->delay;
+
+ if (!delay) {
+ for (i = 0; i < p_hdr->count; i++, entry++)
+ qlcnic_83xx_poll_reg(p_dev, entry->arg1,
+ delay, poll->mask,
+ poll->status);
+ } else {
+ for (i = 0; i < p_hdr->count; i++, entry++) {
+ arg1 = entry->arg1;
+ arg2 = entry->arg2;
+ if (delay) {
+ if (qlcnic_83xx_poll_reg(p_dev,
+ arg1, delay,
+ poll->mask,
+ poll->status)){
+ QLCRD32(p_dev, arg1, &err);
+ if (err == -EIO)
+ return;
+ QLCRD32(p_dev, arg2, &err);
+ if (err == -EIO)
+ return;
+ }
+ }
+ }
+ }
+}
+
+/* Poll and write HW register command */
+static void qlcnic_83xx_poll_write_list(struct qlcnic_adapter *p_dev,
+ struct qlc_83xx_entry_hdr *p_hdr)
+{
+ int i;
+ long delay;
+ struct qlc_83xx_quad_entry *entry;
+ struct qlc_83xx_poll *poll;
+
+ poll = (struct qlc_83xx_poll *)((char *)p_hdr +
+ sizeof(struct qlc_83xx_entry_hdr));
+ entry = (struct qlc_83xx_quad_entry *)((char *)poll +
+ sizeof(struct qlc_83xx_poll));
+ delay = (long)p_hdr->delay;
+
+ for (i = 0; i < p_hdr->count; i++, entry++) {
+ qlcnic_83xx_wrt_reg_indirect(p_dev, entry->dr_addr,
+ entry->dr_value);
+ qlcnic_83xx_wrt_reg_indirect(p_dev, entry->ar_addr,
+ entry->ar_value);
+ if (delay)
+ qlcnic_83xx_poll_reg(p_dev, entry->ar_addr, delay,
+ poll->mask, poll->status);
+ }
+}
+
+/* Read Modify Write register command */
+static void qlcnic_83xx_read_modify_write(struct qlcnic_adapter *p_dev,
+ struct qlc_83xx_entry_hdr *p_hdr)
+{
+ int i;
+ struct qlc_83xx_entry *entry;
+ struct qlc_83xx_rmw *rmw_hdr;
+
+ rmw_hdr = (struct qlc_83xx_rmw *)((char *)p_hdr +
+ sizeof(struct qlc_83xx_entry_hdr));
+
+ entry = (struct qlc_83xx_entry *)((char *)rmw_hdr +
+ sizeof(struct qlc_83xx_rmw));
+
+ for (i = 0; i < p_hdr->count; i++, entry++) {
+ qlcnic_83xx_rmw_crb_reg(p_dev, entry->arg1,
+ entry->arg2, rmw_hdr);
+ if (p_hdr->delay)
+ udelay((u32)(p_hdr->delay));
+ }
+}
+
+static void qlcnic_83xx_pause(struct qlc_83xx_entry_hdr *p_hdr)
+{
+ if (p_hdr->delay)
+ mdelay((u32)((long)p_hdr->delay));
+}
+
+/* Read and poll register command */
+static void qlcnic_83xx_poll_read_list(struct qlcnic_adapter *p_dev,
+ struct qlc_83xx_entry_hdr *p_hdr)
+{
+ long delay;
+ int index, i, j, err;
+ struct qlc_83xx_quad_entry *entry;
+ struct qlc_83xx_poll *poll;
+ unsigned long addr;
+
+ poll = (struct qlc_83xx_poll *)((char *)p_hdr +
+ sizeof(struct qlc_83xx_entry_hdr));
+
+ entry = (struct qlc_83xx_quad_entry *)((char *)poll +
+ sizeof(struct qlc_83xx_poll));
+ delay = (long)p_hdr->delay;
+
+ for (i = 0; i < p_hdr->count; i++, entry++) {
+ qlcnic_83xx_wrt_reg_indirect(p_dev, entry->ar_addr,
+ entry->ar_value);
+ if (delay) {
+ if (!qlcnic_83xx_poll_reg(p_dev, entry->ar_addr, delay,
+ poll->mask, poll->status)){
+ index = p_dev->ahw->reset.array_index;
+ addr = entry->dr_addr;
+ j = QLCRD32(p_dev, addr, &err);
+ if (err == -EIO)
+ return;
+
+ p_dev->ahw->reset.array[index++] = j;
+
+ if (index == QLC_83XX_MAX_RESET_SEQ_ENTRIES)
+ p_dev->ahw->reset.array_index = 1;
+ }
+ }
+ }
+}
+
+static inline void qlcnic_83xx_seq_end(struct qlcnic_adapter *p_dev)
+{
+ p_dev->ahw->reset.seq_end = 1;
+}
+
+static void qlcnic_83xx_template_end(struct qlcnic_adapter *p_dev)
+{
+ p_dev->ahw->reset.template_end = 1;
+ if (p_dev->ahw->reset.seq_error == 0)
+ dev_err(&p_dev->pdev->dev,
+ "HW restart process completed successfully.\n");
+ else
+ dev_err(&p_dev->pdev->dev,
+ "HW restart completed with timeout errors.\n");
+}
+
+/**
+* qlcnic_83xx_exec_template_cmd
+*
+* @p_dev: adapter structure
+* @p_buff: Poiter to instruction template
+*
+* Template provides instructions to stop, restart and initalize firmware.
+* These instructions are abstracted as a series of read, write and
+* poll operations on hardware registers. Register information and operation
+* specifics are not exposed to the driver. Driver reads the template from
+* flash and executes the instructions located at pre-defined offsets.
+*
+* Returns: None
+* */
+static void qlcnic_83xx_exec_template_cmd(struct qlcnic_adapter *p_dev,
+ char *p_buff)
+{
+ int index, entries;
+ struct qlc_83xx_entry_hdr *p_hdr;
+ char *entry = p_buff;
+
+ p_dev->ahw->reset.seq_end = 0;
+ p_dev->ahw->reset.template_end = 0;
+ entries = p_dev->ahw->reset.hdr->entries;
+ index = p_dev->ahw->reset.seq_index;
+
+ for (; (!p_dev->ahw->reset.seq_end) && (index < entries); index++) {
+ p_hdr = (struct qlc_83xx_entry_hdr *)entry;
+
+ switch (p_hdr->cmd) {
+ case QLC_83XX_OPCODE_NOP:
+ break;
+ case QLC_83XX_OPCODE_WRITE_LIST:
+ qlcnic_83xx_write_list(p_dev, p_hdr);
+ break;
+ case QLC_83XX_OPCODE_READ_WRITE_LIST:
+ qlcnic_83xx_read_write_list(p_dev, p_hdr);
+ break;
+ case QLC_83XX_OPCODE_POLL_LIST:
+ qlcnic_83xx_poll_list(p_dev, p_hdr);
+ break;
+ case QLC_83XX_OPCODE_POLL_WRITE_LIST:
+ qlcnic_83xx_poll_write_list(p_dev, p_hdr);
+ break;
+ case QLC_83XX_OPCODE_READ_MODIFY_WRITE:
+ qlcnic_83xx_read_modify_write(p_dev, p_hdr);
+ break;
+ case QLC_83XX_OPCODE_SEQ_PAUSE:
+ qlcnic_83xx_pause(p_hdr);
+ break;
+ case QLC_83XX_OPCODE_SEQ_END:
+ qlcnic_83xx_seq_end(p_dev);
+ break;
+ case QLC_83XX_OPCODE_TMPL_END:
+ qlcnic_83xx_template_end(p_dev);
+ break;
+ case QLC_83XX_OPCODE_POLL_READ_LIST:
+ qlcnic_83xx_poll_read_list(p_dev, p_hdr);
+ break;
+ default:
+ dev_err(&p_dev->pdev->dev,
+ "%s: Unknown opcode 0x%04x in template %d\n",
+ __func__, p_hdr->cmd, index);
+ break;
+ }
+ entry += p_hdr->size;
+ cond_resched();
+ }
+ p_dev->ahw->reset.seq_index = index;
+}
+
+static void qlcnic_83xx_stop_hw(struct qlcnic_adapter *p_dev)
+{
+ p_dev->ahw->reset.seq_index = 0;
+
+ qlcnic_83xx_exec_template_cmd(p_dev, p_dev->ahw->reset.stop_offset);
+ if (p_dev->ahw->reset.seq_end != 1)
+ dev_err(&p_dev->pdev->dev, "%s: failed\n", __func__);
+}
+
+static void qlcnic_83xx_start_hw(struct qlcnic_adapter *p_dev)
+{
+ qlcnic_83xx_exec_template_cmd(p_dev, p_dev->ahw->reset.start_offset);
+ if (p_dev->ahw->reset.template_end != 1)
+ dev_err(&p_dev->pdev->dev, "%s: failed\n", __func__);
+}
+
+static void qlcnic_83xx_init_hw(struct qlcnic_adapter *p_dev)
+{
+ qlcnic_83xx_exec_template_cmd(p_dev, p_dev->ahw->reset.init_offset);
+ if (p_dev->ahw->reset.seq_end != 1)
+ dev_err(&p_dev->pdev->dev, "%s: failed\n", __func__);
+}
+
+/* POST FW related definations*/
+#define QLC_83XX_POST_SIGNATURE_REG 0x41602014
+#define QLC_83XX_POST_MODE_REG 0x41602018
+#define QLC_83XX_POST_FAST_MODE 0
+#define QLC_83XX_POST_MEDIUM_MODE 1
+#define QLC_83XX_POST_SLOW_MODE 2
+
+/* POST Timeout values in milliseconds */
+#define QLC_83XX_POST_FAST_MODE_TIMEOUT 690
+#define QLC_83XX_POST_MED_MODE_TIMEOUT 2930
+#define QLC_83XX_POST_SLOW_MODE_TIMEOUT 7500
+
+/* POST result values */
+#define QLC_83XX_POST_PASS 0xfffffff0
+#define QLC_83XX_POST_ASIC_STRESS_TEST_FAIL 0xffffffff
+#define QLC_83XX_POST_DDR_TEST_FAIL 0xfffffffe
+#define QLC_83XX_POST_ASIC_MEMORY_TEST_FAIL 0xfffffffc
+#define QLC_83XX_POST_FLASH_TEST_FAIL 0xfffffff8
+
+static int qlcnic_83xx_run_post(struct qlcnic_adapter *adapter)
+{
+ struct qlc_83xx_fw_info *fw_info = adapter->ahw->fw_info;
+ struct device *dev = &adapter->pdev->dev;
+ int timeout, count, ret = 0;
+ u32 signature;
+
+ /* Set timeout values with extra 2 seconds of buffer */
+ switch (adapter->ahw->post_mode) {
+ case QLC_83XX_POST_FAST_MODE:
+ timeout = QLC_83XX_POST_FAST_MODE_TIMEOUT + 2000;
+ break;
+ case QLC_83XX_POST_MEDIUM_MODE:
+ timeout = QLC_83XX_POST_MED_MODE_TIMEOUT + 2000;
+ break;
+ case QLC_83XX_POST_SLOW_MODE:
+ timeout = QLC_83XX_POST_SLOW_MODE_TIMEOUT + 2000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ strncpy(fw_info->fw_file_name, QLC_83XX_POST_FW_FILE_NAME,
+ QLC_FW_FILE_NAME_LEN);
+
+ ret = request_firmware(&fw_info->fw, fw_info->fw_file_name, dev);
+ if (ret) {
+ dev_err(dev, "POST firmware can not be loaded, skipping POST\n");
+ return 0;
+ }
+
+ ret = qlcnic_83xx_copy_fw_file(adapter);
+ if (ret)
+ return ret;
+
+ /* clear QLC_83XX_POST_SIGNATURE_REG register */
+ qlcnic_ind_wr(adapter, QLC_83XX_POST_SIGNATURE_REG, 0);
+
+ /* Set POST mode */
+ qlcnic_ind_wr(adapter, QLC_83XX_POST_MODE_REG,
+ adapter->ahw->post_mode);
+
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID,
+ QLC_83XX_BOOT_FROM_FILE);
+
+ qlcnic_83xx_start_hw(adapter);
+
+ count = 0;
+ do {
+ msleep(100);
+ count += 100;
+
+ signature = qlcnic_ind_rd(adapter, QLC_83XX_POST_SIGNATURE_REG);
+ if (signature == QLC_83XX_POST_PASS)
+ break;
+ } while (timeout > count);
+
+ if (timeout <= count) {
+ dev_err(dev, "POST timed out, signature = 0x%08x\n", signature);
+ return -EIO;
+ }
+
+ switch (signature) {
+ case QLC_83XX_POST_PASS:
+ dev_info(dev, "POST passed, Signature = 0x%08x\n", signature);
+ break;
+ case QLC_83XX_POST_ASIC_STRESS_TEST_FAIL:
+ dev_err(dev, "POST failed, Test case : ASIC STRESS TEST, Signature = 0x%08x\n",
+ signature);
+ ret = -EIO;
+ break;
+ case QLC_83XX_POST_DDR_TEST_FAIL:
+ dev_err(dev, "POST failed, Test case : DDT TEST, Signature = 0x%08x\n",
+ signature);
+ ret = -EIO;
+ break;
+ case QLC_83XX_POST_ASIC_MEMORY_TEST_FAIL:
+ dev_err(dev, "POST failed, Test case : ASIC MEMORY TEST, Signature = 0x%08x\n",
+ signature);
+ ret = -EIO;
+ break;
+ case QLC_83XX_POST_FLASH_TEST_FAIL:
+ dev_err(dev, "POST failed, Test case : FLASH TEST, Signature = 0x%08x\n",
+ signature);
+ ret = -EIO;
+ break;
+ default:
+ dev_err(dev, "POST failed, Test case : INVALID, Signature = 0x%08x\n",
+ signature);
+ ret = -EIO;
+ break;
+ }
+
+ return ret;
+}
+
+static int qlcnic_83xx_load_fw_image_from_host(struct qlcnic_adapter *adapter)
+{
+ struct qlc_83xx_fw_info *fw_info = adapter->ahw->fw_info;
+ int err = -EIO;
+
+ if (request_firmware(&fw_info->fw, fw_info->fw_file_name,
+ &(adapter->pdev->dev))) {
+ dev_err(&adapter->pdev->dev,
+ "No file FW image, loading flash FW image.\n");
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID,
+ QLC_83XX_BOOT_FROM_FLASH);
+ } else {
+ if (qlcnic_83xx_copy_fw_file(adapter))
+ return err;
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID,
+ QLC_83XX_BOOT_FROM_FILE);
+ }
+
+ return 0;
+}
+
+static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+ int err = -EIO;
+
+ qlcnic_83xx_stop_hw(adapter);
+
+ /* Collect FW register dump if required */
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ if (!(val & QLC_83XX_IDC_GRACEFULL_RESET))
+ qlcnic_dump_fw(adapter);
+
+ if (val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) {
+ netdev_info(adapter->netdev, "%s: Auto firmware recovery is disabled\n",
+ __func__);
+ qlcnic_83xx_idc_enter_failed_state(adapter, 1);
+ return err;
+ }
+
+ qlcnic_83xx_init_hw(adapter);
+
+ if (qlcnic_83xx_copy_bootloader(adapter))
+ return err;
+
+ /* Check if POST needs to be run */
+ if (adapter->ahw->run_post) {
+ err = qlcnic_83xx_run_post(adapter);
+ if (err)
+ return err;
+
+ /* No need to run POST in next reset sequence */
+ adapter->ahw->run_post = false;
+
+ /* Again reset the adapter to load regular firmware */
+ qlcnic_83xx_stop_hw(adapter);
+ qlcnic_83xx_init_hw(adapter);
+
+ err = qlcnic_83xx_copy_bootloader(adapter);
+ if (err)
+ return err;
+ }
+
+ /* Boot either flash image or firmware image from host file system */
+ if (qlcnic_load_fw_file == 1) {
+ err = qlcnic_83xx_load_fw_image_from_host(adapter);
+ if (err)
+ return err;
+ } else {
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID,
+ QLC_83XX_BOOT_FROM_FLASH);
+ }
+
+ qlcnic_83xx_start_hw(adapter);
+ if (qlcnic_83xx_check_hw_status(adapter))
+ return -EIO;
+
+ return 0;
+}
+
+static int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter)
+{
+ int err;
+ struct qlcnic_info nic_info;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ memset(&nic_info, 0, sizeof(struct qlcnic_info));
+ err = qlcnic_get_nic_info(adapter, &nic_info, ahw->pci_func);
+ if (err)
+ return -EIO;
+
+ ahw->physical_port = (u8) nic_info.phys_port;
+ ahw->switch_mode = nic_info.switch_mode;
+ ahw->max_tx_ques = nic_info.max_tx_ques;
+ ahw->max_rx_ques = nic_info.max_rx_ques;
+ ahw->capabilities = nic_info.capabilities;
+ ahw->max_mac_filters = nic_info.max_mac_filters;
+ ahw->max_mtu = nic_info.max_mtu;
+
+ /* eSwitch capability indicates vNIC mode.
+ * vNIC and SRIOV are mutually exclusive operational modes.
+ * If SR-IOV capability is detected, SR-IOV physical function
+ * will get initialized in default mode.
+ * SR-IOV virtual function initialization follows a
+ * different code path and opmode.
+ * SRIOV mode has precedence over vNIC mode.
+ */
+ if (test_bit(__QLCNIC_SRIOV_CAPABLE, &adapter->state))
+ return QLC_83XX_DEFAULT_OPMODE;
+
+ if (ahw->capabilities & QLC_83XX_ESWITCH_CAPABILITY)
+ return QLCNIC_VNIC_MODE;
+
+ return QLC_83XX_DEFAULT_OPMODE;
+}
+
+int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u16 max_sds_rings, max_tx_rings;
+ int ret;
+
+ ret = qlcnic_83xx_get_nic_configuration(adapter);
+ if (ret == -EIO)
+ return -EIO;
+
+ if (ret == QLCNIC_VNIC_MODE) {
+ ahw->nic_mode = QLCNIC_VNIC_MODE;
+
+ if (qlcnic_83xx_config_vnic_opmode(adapter))
+ return -EIO;
+
+ max_sds_rings = QLCNIC_MAX_VNIC_SDS_RINGS;
+ max_tx_rings = QLCNIC_MAX_VNIC_TX_RINGS;
+ } else if (ret == QLC_83XX_DEFAULT_OPMODE) {
+ ahw->nic_mode = QLCNIC_DEFAULT_MODE;
+ adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
+ ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
+ max_sds_rings = QLCNIC_MAX_SDS_RINGS;
+ max_tx_rings = QLCNIC_MAX_TX_RINGS;
+ } else {
+ dev_err(&adapter->pdev->dev, "%s: Invalid opmode %d\n",
+ __func__, ret);
+ return -EIO;
+ }
+
+ adapter->max_sds_rings = min(ahw->max_rx_ques, max_sds_rings);
+ adapter->max_tx_rings = min(ahw->max_tx_ques, max_tx_rings);
+
+ return 0;
+}
+
+static void qlcnic_83xx_config_buff_descriptors(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (ahw->port_type == QLCNIC_XGBE) {
+ adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
+ adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G;
+ adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+ adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+
+ } else if (ahw->port_type == QLCNIC_GBE) {
+ adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
+ adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+ adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+ adapter->max_rxd = MAX_RCV_DESCRIPTORS_1G;
+ }
+ adapter->num_txd = MAX_CMD_DESCRIPTORS;
+ adapter->max_rds_rings = MAX_RDS_RINGS;
+}
+
+static int qlcnic_83xx_init_default_driver(struct qlcnic_adapter *adapter)
+{
+ int err = -EIO;
+
+ qlcnic_83xx_get_minidump_template(adapter);
+ if (qlcnic_83xx_get_port_info(adapter))
+ return err;
+
+ qlcnic_83xx_config_buff_descriptors(adapter);
+ adapter->ahw->msix_supported = !!qlcnic_use_msi_x;
+ adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
+
+ dev_info(&adapter->pdev->dev, "HAL Version: %d\n",
+ adapter->ahw->fw_hal_version);
+
+ return 0;
+}
+
+#define IS_QLC_83XX_USED(a, b, c) (((1 << a->portnum) & b) || ((c >> 6) & 0x1))
+static void qlcnic_83xx_clear_function_resources(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_cmd_args cmd;
+ u32 presence_mask, audit_mask;
+ int status;
+
+ presence_mask = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_PRESENCE);
+ audit_mask = QLCRDX(adapter->ahw, QLC_83XX_IDC_DRV_AUDIT);
+
+ if (IS_QLC_83XX_USED(adapter, presence_mask, audit_mask)) {
+ status = qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_STOP_NIC_FUNC);
+ if (status)
+ return;
+
+ cmd.req.arg[1] = BIT_31;
+ status = qlcnic_issue_cmd(adapter, &cmd);
+ if (status)
+ dev_err(&adapter->pdev->dev,
+ "Failed to clean up the function resources\n");
+ qlcnic_free_mbx_args(&cmd);
+ }
+}
+
+static int qlcnic_83xx_get_fw_info(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct pci_dev *pdev = adapter->pdev;
+ struct qlc_83xx_fw_info *fw_info;
+ int err = 0;
+
+ ahw->fw_info = kzalloc(sizeof(*fw_info), GFP_KERNEL);
+ if (!ahw->fw_info) {
+ err = -ENOMEM;
+ } else {
+ fw_info = ahw->fw_info;
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_QLOGIC_QLE834X:
+ case PCI_DEVICE_ID_QLOGIC_QLE8830:
+ strncpy(fw_info->fw_file_name, QLC_83XX_FW_FILE_NAME,
+ QLC_FW_FILE_NAME_LEN);
+ break;
+ case PCI_DEVICE_ID_QLOGIC_QLE844X:
+ strncpy(fw_info->fw_file_name, QLC_84XX_FW_FILE_NAME,
+ QLC_FW_FILE_NAME_LEN);
+ break;
+ default:
+ dev_err(&pdev->dev, "%s: Invalid device id\n",
+ __func__);
+ err = -EINVAL;
+ break;
+ }
+ }
+
+ return err;
+}
+
+static void qlcnic_83xx_init_rings(struct qlcnic_adapter *adapter)
+{
+ u8 rx_cnt = QLCNIC_DEF_SDS_RINGS;
+ u8 tx_cnt = QLCNIC_DEF_TX_RINGS;
+
+ adapter->max_tx_rings = QLCNIC_MAX_TX_RINGS;
+ adapter->max_sds_rings = QLCNIC_MAX_SDS_RINGS;
+
+ if (!adapter->ahw->msix_supported) {
+ rx_cnt = QLCNIC_SINGLE_RING;
+ tx_cnt = QLCNIC_SINGLE_RING;
+ }
+
+ /* compute and set drv sds rings */
+ qlcnic_set_tx_ring_count(adapter, tx_cnt);
+ qlcnic_set_sds_ring_count(adapter, rx_cnt);
+}
+
+int qlcnic_83xx_init(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int err = 0;
+
+ adapter->rx_mac_learn = false;
+ ahw->msix_supported = !!qlcnic_use_msi_x;
+
+ /* Check if POST needs to be run */
+ switch (qlcnic_load_fw_file) {
+ case 2:
+ ahw->post_mode = QLC_83XX_POST_FAST_MODE;
+ ahw->run_post = true;
+ break;
+ case 3:
+ ahw->post_mode = QLC_83XX_POST_MEDIUM_MODE;
+ ahw->run_post = true;
+ break;
+ case 4:
+ ahw->post_mode = QLC_83XX_POST_SLOW_MODE;
+ ahw->run_post = true;
+ break;
+ default:
+ ahw->run_post = false;
+ break;
+ }
+
+ qlcnic_83xx_init_rings(adapter);
+
+ err = qlcnic_83xx_init_mailbox_work(adapter);
+ if (err)
+ goto exit;
+
+ if (qlcnic_sriov_vf_check(adapter)) {
+ err = qlcnic_sriov_vf_init(adapter);
+ if (err)
+ goto detach_mbx;
+ else
+ return err;
+ }
+
+ if (qlcnic_83xx_read_flash_descriptor_table(adapter) ||
+ qlcnic_83xx_read_flash_mfg_id(adapter)) {
+ dev_err(&adapter->pdev->dev, "Failed reading flash mfg id\n");
+ err = -ENOTRECOVERABLE;
+ goto detach_mbx;
+ }
+
+ err = qlcnic_83xx_check_hw_status(adapter);
+ if (err)
+ goto detach_mbx;
+
+ err = qlcnic_83xx_get_fw_info(adapter);
+ if (err)
+ goto detach_mbx;
+
+ err = qlcnic_83xx_idc_init(adapter);
+ if (err)
+ goto detach_mbx;
+
+ err = qlcnic_setup_intr(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n");
+ goto disable_intr;
+ }
+
+ INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work);
+
+ err = qlcnic_83xx_setup_mbx_intr(adapter);
+ if (err)
+ goto disable_mbx_intr;
+
+ qlcnic_83xx_clear_function_resources(adapter);
+
+ err = qlcnic_dcb_enable(adapter->dcb);
+ if (err) {
+ qlcnic_dcb_free(adapter->dcb);
+ goto disable_mbx_intr;
+ }
+
+ qlcnic_83xx_initialize_nic(adapter, 1);
+ qlcnic_dcb_get_info(adapter->dcb);
+
+ /* Configure default, SR-IOV or Virtual NIC mode of operation */
+ err = qlcnic_83xx_configure_opmode(adapter);
+ if (err)
+ goto disable_mbx_intr;
+
+
+ /* Perform operating mode specific initialization */
+ err = adapter->nic_ops->init_driver(adapter);
+ if (err)
+ goto disable_mbx_intr;
+
+ /* Periodically monitor device status */
+ qlcnic_83xx_idc_poll_dev_state(&adapter->fw_work.work);
+ return 0;
+
+disable_mbx_intr:
+ qlcnic_83xx_free_mbx_intr(adapter);
+
+disable_intr:
+ qlcnic_teardown_intr(adapter);
+
+detach_mbx:
+ qlcnic_83xx_detach_mailbox_work(adapter);
+ qlcnic_83xx_free_mailbox(ahw->mailbox);
+ ahw->mailbox = NULL;
+exit:
+ return err;
+}
+
+void qlcnic_83xx_aer_stop_poll_work(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlc_83xx_idc *idc = &ahw->idc;
+
+ clear_bit(QLC_83XX_MBX_READY, &idc->status);
+ cancel_delayed_work_sync(&adapter->fw_work);
+
+ if (ahw->nic_mode == QLCNIC_VNIC_MODE)
+ qlcnic_83xx_disable_vnic_mode(adapter, 1);
+
+ qlcnic_83xx_idc_detach_driver(adapter);
+ qlcnic_83xx_initialize_nic(adapter, 0);
+
+ cancel_delayed_work_sync(&adapter->idc_aen_work);
+}
+
+int qlcnic_83xx_aer_reset(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlc_83xx_idc *idc = &ahw->idc;
+ int ret = 0;
+ u32 owner;
+
+ /* Mark the previous IDC state as NEED_RESET so
+ * that state_entry() will perform the reattachment
+ * and bringup the device
+ */
+ idc->prev_state = QLC_83XX_IDC_DEV_NEED_RESET;
+ owner = qlcnic_83xx_idc_find_reset_owner_id(adapter);
+ if (ahw->pci_func == owner) {
+ ret = qlcnic_83xx_restart_hw(adapter);
+ if (ret < 0)
+ return ret;
+ qlcnic_83xx_idc_clear_registers(adapter, 0);
+ }
+
+ ret = idc->state_entry(adapter);
+ return ret;
+}
+
+void qlcnic_83xx_aer_start_poll_work(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlc_83xx_idc *idc = &ahw->idc;
+ u32 owner;
+
+ idc->prev_state = QLC_83XX_IDC_DEV_READY;
+ owner = qlcnic_83xx_idc_find_reset_owner_id(adapter);
+ if (ahw->pci_func == owner)
+ qlcnic_83xx_idc_enter_ready_state(adapter, 0);
+
+ qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state, 0);
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
new file mode 100644
index 000000000..711609503
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
@@ -0,0 +1,284 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ */
+
+#include "qlcnic.h"
+#include "qlcnic_hw.h"
+
+static int qlcnic_83xx_enable_vnic_mode(struct qlcnic_adapter *adapter, int lock)
+{
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+ QLCWRX(adapter->ahw, QLC_83XX_VNIC_STATE, QLCNIC_DEV_NPAR_OPER);
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+int qlcnic_83xx_disable_vnic_mode(struct qlcnic_adapter *adapter, int lock)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (lock) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+ }
+
+ QLCWRX(adapter->ahw, QLC_83XX_VNIC_STATE, QLCNIC_DEV_NPAR_NON_OPER);
+ ahw->idc.vnic_state = QLCNIC_DEV_NPAR_NON_OPER;
+
+ if (lock)
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+int qlcnic_83xx_set_vnic_opmode(struct qlcnic_adapter *adapter)
+{
+ u8 id;
+ int ret = -EBUSY;
+ u32 data = QLCNIC_MGMT_FUNC;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (qlcnic_83xx_lock_driver(adapter))
+ return ret;
+
+ id = ahw->pci_func;
+ data = QLCRDX(adapter->ahw, QLC_83XX_DRV_OP_MODE);
+ data = (data & ~QLC_83XX_SET_FUNC_OPMODE(0x3, id)) |
+ QLC_83XX_SET_FUNC_OPMODE(QLCNIC_MGMT_FUNC, id);
+
+ QLCWRX(adapter->ahw, QLC_83XX_DRV_OP_MODE, data);
+
+ qlcnic_83xx_unlock_driver(adapter);
+
+ return 0;
+}
+
+static void
+qlcnic_83xx_config_vnic_buff_descriptors(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (ahw->port_type == QLCNIC_XGBE) {
+ adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_VF;
+ adapter->max_rxd = MAX_RCV_DESCRIPTORS_VF;
+ adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+ adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+
+ } else if (ahw->port_type == QLCNIC_GBE) {
+ adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
+ adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+ adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+ adapter->max_rxd = MAX_RCV_DESCRIPTORS_1G;
+ }
+ adapter->num_txd = MAX_CMD_DESCRIPTORS;
+ adapter->max_rds_rings = MAX_RDS_RINGS;
+}
+
+
+/**
+ * qlcnic_83xx_init_mgmt_vnic
+ *
+ * @adapter: adapter structure
+ * Management virtual NIC sets the operational mode of other vNIC's and
+ * configures embedded switch (ESWITCH).
+ * Returns: Success(0) or error code.
+ *
+ **/
+static int qlcnic_83xx_init_mgmt_vnic(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct device *dev = &adapter->pdev->dev;
+ struct qlcnic_npar_info *npar;
+ int i, err = -EIO;
+
+ qlcnic_83xx_get_minidump_template(adapter);
+
+ if (!(adapter->flags & QLCNIC_ADAPTER_INITIALIZED)) {
+ if (qlcnic_init_pci_info(adapter))
+ return err;
+
+ npar = adapter->npars;
+
+ for (i = 0; i < ahw->total_nic_func; i++, npar++) {
+ dev_info(dev, "id:%d active:%d type:%d port:%d min_bw:%d max_bw:%d mac_addr:%pM\n",
+ npar->pci_func, npar->active, npar->type,
+ npar->phy_port, npar->min_bw, npar->max_bw,
+ npar->mac);
+ }
+
+ dev_info(dev, "Max functions = %d, active functions = %d\n",
+ ahw->max_pci_func, ahw->total_nic_func);
+
+ if (qlcnic_83xx_set_vnic_opmode(adapter))
+ return err;
+
+ if (qlcnic_set_default_offload_settings(adapter))
+ return err;
+ } else {
+ if (qlcnic_reset_npar_config(adapter))
+ return err;
+ }
+
+ if (qlcnic_83xx_get_port_info(adapter))
+ return err;
+
+ qlcnic_83xx_config_vnic_buff_descriptors(adapter);
+ ahw->msix_supported = qlcnic_use_msi_x ? 1 : 0;
+ adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
+ qlcnic_83xx_enable_vnic_mode(adapter, 1);
+
+ dev_info(dev, "HAL Version: %d, Management function\n",
+ ahw->fw_hal_version);
+
+ return 0;
+}
+
+static int qlcnic_83xx_init_privileged_vnic(struct qlcnic_adapter *adapter)
+{
+ int err = -EIO;
+
+ qlcnic_83xx_get_minidump_template(adapter);
+ if (qlcnic_83xx_get_port_info(adapter))
+ return err;
+
+ qlcnic_83xx_config_vnic_buff_descriptors(adapter);
+ adapter->ahw->msix_supported = !!qlcnic_use_msi_x;
+ adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
+
+ dev_info(&adapter->pdev->dev,
+ "HAL Version: %d, Privileged function\n",
+ adapter->ahw->fw_hal_version);
+ return 0;
+}
+
+static int qlcnic_83xx_init_non_privileged_vnic(struct qlcnic_adapter *adapter)
+{
+ int err = -EIO;
+
+ qlcnic_83xx_get_fw_version(adapter);
+ if (qlcnic_set_eswitch_port_config(adapter))
+ return err;
+
+ if (qlcnic_83xx_get_port_info(adapter))
+ return err;
+
+ qlcnic_83xx_config_vnic_buff_descriptors(adapter);
+ adapter->ahw->msix_supported = !!qlcnic_use_msi_x;
+ adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
+
+ dev_info(&adapter->pdev->dev, "HAL Version: %d, Virtual function\n",
+ adapter->ahw->fw_hal_version);
+
+ return 0;
+}
+
+/**
+ * qlcnic_83xx_config_vnic_opmode
+ *
+ * @adapter: adapter structure
+ * Identify virtual NIC operational modes.
+ *
+ * Returns: Success(0) or error code.
+ *
+ **/
+int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter)
+{
+ u32 op_mode, priv_level;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_nic_template *nic_ops = adapter->nic_ops;
+
+ qlcnic_get_func_no(adapter);
+ op_mode = QLCRDX(adapter->ahw, QLC_83XX_DRV_OP_MODE);
+
+ if (op_mode == QLC_83XX_DEFAULT_OPMODE)
+ priv_level = QLCNIC_MGMT_FUNC;
+ else
+ priv_level = QLC_83XX_GET_FUNC_PRIVILEGE(op_mode,
+ ahw->pci_func);
+ switch (priv_level) {
+ case QLCNIC_NON_PRIV_FUNC:
+ ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
+ ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
+ nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
+ break;
+ case QLCNIC_PRIV_FUNC:
+ ahw->op_mode = QLCNIC_PRIV_FUNC;
+ ahw->idc.state_entry = qlcnic_83xx_idc_vnic_pf_entry;
+ nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
+ break;
+ case QLCNIC_MGMT_FUNC:
+ ahw->op_mode = QLCNIC_MGMT_FUNC;
+ ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
+ nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
+ break;
+ default:
+ dev_err(&adapter->pdev->dev, "Invalid Virtual NIC opmode\n");
+ return -EIO;
+ }
+
+ if (ahw->capabilities & QLC_83XX_ESWITCH_CAPABILITY) {
+ adapter->flags |= QLCNIC_ESWITCH_ENABLED;
+ if (adapter->drv_mac_learn)
+ adapter->rx_mac_learn = true;
+ } else {
+ adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
+ adapter->rx_mac_learn = false;
+ }
+
+ ahw->idc.vnic_state = QLCNIC_DEV_NPAR_NON_OPER;
+ ahw->idc.vnic_wait_limit = QLCNIC_DEV_NPAR_OPER_TIMEO;
+
+ return 0;
+}
+
+int qlcnic_83xx_check_vnic_state(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlc_83xx_idc *idc = &ahw->idc;
+ u32 state;
+
+ state = QLCRDX(ahw, QLC_83XX_VNIC_STATE);
+ while (state != QLCNIC_DEV_NPAR_OPER && idc->vnic_wait_limit) {
+ idc->vnic_wait_limit--;
+ msleep(1000);
+ state = QLCRDX(ahw, QLC_83XX_VNIC_STATE);
+ }
+
+ if (state != QLCNIC_DEV_NPAR_OPER) {
+ dev_err(&adapter->pdev->dev,
+ "vNIC mode not operational, state check timed out.\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int qlcnic_83xx_set_port_eswitch_status(struct qlcnic_adapter *adapter,
+ int func, int *port_id)
+{
+ struct qlcnic_info nic_info;
+ int err = 0;
+
+ memset(&nic_info, 0, sizeof(struct qlcnic_info));
+
+ err = qlcnic_get_nic_info(adapter, &nic_info, func);
+ if (err)
+ return err;
+
+ if (nic_info.capabilities & QLC_83XX_ESWITCH_CAPABILITY)
+ *port_id = nic_info.phys_port;
+ else
+ err = -EIO;
+
+ if (!err)
+ adapter->eswitch[*port_id].flags |= QLCNIC_SWITCH_ENABLE;
+
+ return err;
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
new file mode 100644
index 000000000..eb827b86e
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -0,0 +1,1435 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ */
+
+#include "qlcnic.h"
+
+static const struct qlcnic_mailbox_metadata qlcnic_mbx_tbl[] = {
+ {QLCNIC_CMD_CREATE_RX_CTX, 4, 1},
+ {QLCNIC_CMD_DESTROY_RX_CTX, 2, 1},
+ {QLCNIC_CMD_CREATE_TX_CTX, 4, 1},
+ {QLCNIC_CMD_DESTROY_TX_CTX, 3, 1},
+ {QLCNIC_CMD_INTRPT_TEST, 4, 1},
+ {QLCNIC_CMD_SET_MTU, 4, 1},
+ {QLCNIC_CMD_READ_PHY, 4, 2},
+ {QLCNIC_CMD_WRITE_PHY, 5, 1},
+ {QLCNIC_CMD_READ_HW_REG, 4, 1},
+ {QLCNIC_CMD_GET_FLOW_CTL, 4, 2},
+ {QLCNIC_CMD_SET_FLOW_CTL, 4, 1},
+ {QLCNIC_CMD_READ_MAX_MTU, 4, 2},
+ {QLCNIC_CMD_READ_MAX_LRO, 4, 2},
+ {QLCNIC_CMD_MAC_ADDRESS, 4, 3},
+ {QLCNIC_CMD_GET_PCI_INFO, 4, 1},
+ {QLCNIC_CMD_GET_NIC_INFO, 4, 1},
+ {QLCNIC_CMD_SET_NIC_INFO, 4, 1},
+ {QLCNIC_CMD_GET_ESWITCH_CAPABILITY, 4, 3},
+ {QLCNIC_CMD_TOGGLE_ESWITCH, 4, 1},
+ {QLCNIC_CMD_GET_ESWITCH_STATUS, 4, 3},
+ {QLCNIC_CMD_SET_PORTMIRRORING, 4, 1},
+ {QLCNIC_CMD_CONFIGURE_ESWITCH, 4, 1},
+ {QLCNIC_CMD_GET_MAC_STATS, 4, 1},
+ {QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG, 4, 3},
+ {QLCNIC_CMD_GET_ESWITCH_STATS, 4, 1},
+ {QLCNIC_CMD_CONFIG_PORT, 4, 1},
+ {QLCNIC_CMD_TEMP_SIZE, 4, 4},
+ {QLCNIC_CMD_GET_TEMP_HDR, 4, 1},
+ {QLCNIC_CMD_82XX_SET_DRV_VER, 4, 1},
+ {QLCNIC_CMD_GET_LED_STATUS, 4, 2},
+ {QLCNIC_CMD_MQ_TX_CONFIG_INTR, 2, 3},
+ {QLCNIC_CMD_DCB_QUERY_CAP, 1, 2},
+ {QLCNIC_CMD_DCB_QUERY_PARAM, 4, 1},
+};
+
+static inline u32 qlcnic_get_cmd_signature(struct qlcnic_hardware_context *ahw)
+{
+ return (ahw->pci_func & 0xff) | ((ahw->fw_hal_version & 0xff) << 8) |
+ (0xcafe << 16);
+}
+
+/* Allocate mailbox registers */
+int qlcnic_82xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
+ struct qlcnic_adapter *adapter, u32 type)
+{
+ int i, size;
+ const struct qlcnic_mailbox_metadata *mbx_tbl;
+
+ mbx_tbl = qlcnic_mbx_tbl;
+ size = ARRAY_SIZE(qlcnic_mbx_tbl);
+ for (i = 0; i < size; i++) {
+ if (type == mbx_tbl[i].cmd) {
+ mbx->req.num = mbx_tbl[i].in_args;
+ mbx->rsp.num = mbx_tbl[i].out_args;
+ mbx->req.arg = kcalloc(mbx->req.num,
+ sizeof(u32), GFP_ATOMIC);
+ if (!mbx->req.arg)
+ return -ENOMEM;
+ mbx->rsp.arg = kcalloc(mbx->rsp.num,
+ sizeof(u32), GFP_ATOMIC);
+ if (!mbx->rsp.arg) {
+ kfree(mbx->req.arg);
+ mbx->req.arg = NULL;
+ return -ENOMEM;
+ }
+ mbx->req.arg[0] = type;
+ break;
+ }
+ }
+ return 0;
+}
+
+/* Free up mailbox registers */
+void qlcnic_free_mbx_args(struct qlcnic_cmd_args *cmd)
+{
+ kfree(cmd->req.arg);
+ cmd->req.arg = NULL;
+ kfree(cmd->rsp.arg);
+ cmd->rsp.arg = NULL;
+}
+
+static u32
+qlcnic_poll_rsp(struct qlcnic_adapter *adapter)
+{
+ u32 rsp;
+ int timeout = 0, err = 0;
+
+ do {
+ /* give atleast 1ms for firmware to respond */
+ mdelay(1);
+
+ if (++timeout > QLCNIC_OS_CRB_RETRY_COUNT)
+ return QLCNIC_CDRP_RSP_TIMEOUT;
+
+ rsp = QLCRD32(adapter, QLCNIC_CDRP_CRB_OFFSET, &err);
+ } while (!QLCNIC_CDRP_IS_RSP(rsp));
+
+ return rsp;
+}
+
+int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ int i, err = 0;
+ u32 rsp;
+ u32 signature;
+ struct pci_dev *pdev = adapter->pdev;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ const char *fmt;
+
+ signature = qlcnic_get_cmd_signature(ahw);
+
+ /* Acquire semaphore before accessing CRB */
+ if (qlcnic_api_lock(adapter)) {
+ cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT;
+ return cmd->rsp.arg[0];
+ }
+
+ QLCWR32(adapter, QLCNIC_SIGN_CRB_OFFSET, signature);
+ for (i = 1; i < cmd->req.num; i++)
+ QLCWR32(adapter, QLCNIC_CDRP_ARG(i), cmd->req.arg[i]);
+ QLCWR32(adapter, QLCNIC_CDRP_CRB_OFFSET,
+ QLCNIC_CDRP_FORM_CMD(cmd->req.arg[0]));
+ rsp = qlcnic_poll_rsp(adapter);
+
+ if (rsp == QLCNIC_CDRP_RSP_TIMEOUT) {
+ dev_err(&pdev->dev, "command timeout, response = 0x%x\n", rsp);
+ cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT;
+ } else if (rsp == QLCNIC_CDRP_RSP_FAIL) {
+ cmd->rsp.arg[0] = QLCRD32(adapter, QLCNIC_CDRP_ARG(1), &err);
+ switch (cmd->rsp.arg[0]) {
+ case QLCNIC_RCODE_INVALID_ARGS:
+ fmt = "CDRP invalid args: [%d]\n";
+ break;
+ case QLCNIC_RCODE_NOT_SUPPORTED:
+ case QLCNIC_RCODE_NOT_IMPL:
+ fmt = "CDRP command not supported: [%d]\n";
+ break;
+ case QLCNIC_RCODE_NOT_PERMITTED:
+ fmt = "CDRP requested action not permitted: [%d]\n";
+ break;
+ case QLCNIC_RCODE_INVALID:
+ fmt = "CDRP invalid or unknown cmd received: [%d]\n";
+ break;
+ case QLCNIC_RCODE_TIMEOUT:
+ fmt = "CDRP command timeout: [%d]\n";
+ break;
+ default:
+ fmt = "CDRP command failed: [%d]\n";
+ break;
+ }
+ dev_err(&pdev->dev, fmt, cmd->rsp.arg[0]);
+ qlcnic_dump_mbx(adapter, cmd);
+ } else if (rsp == QLCNIC_CDRP_RSP_OK)
+ cmd->rsp.arg[0] = QLCNIC_RCODE_SUCCESS;
+
+ for (i = 1; i < cmd->rsp.num; i++)
+ cmd->rsp.arg[i] = QLCRD32(adapter, QLCNIC_CDRP_ARG(i), &err);
+
+ /* Release semaphore */
+ qlcnic_api_unlock(adapter);
+ return cmd->rsp.arg[0];
+}
+
+int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *adapter, u32 fw_cmd)
+{
+ struct qlcnic_cmd_args cmd;
+ u32 arg1, arg2, arg3;
+ char drv_string[12];
+ int err = 0;
+
+ memset(drv_string, 0, sizeof(drv_string));
+ snprintf(drv_string, sizeof(drv_string), "%d"".""%d"".""%d",
+ _QLCNIC_LINUX_MAJOR, _QLCNIC_LINUX_MINOR,
+ _QLCNIC_LINUX_SUBVERSION);
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, fw_cmd);
+ if (err)
+ return err;
+
+ memcpy(&arg1, drv_string, sizeof(u32));
+ memcpy(&arg2, drv_string + 4, sizeof(u32));
+ memcpy(&arg3, drv_string + 8, sizeof(u32));
+
+ cmd.req.arg[1] = arg1;
+ cmd.req.arg[2] = arg2;
+ cmd.req.arg[3] = arg3;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ dev_info(&adapter->pdev->dev,
+ "Failed to set driver version in firmware\n");
+ err = -EIO;
+ }
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+int
+qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu)
+{
+ int err = 0;
+ struct qlcnic_cmd_args cmd;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+
+ if (recv_ctx->state != QLCNIC_HOST_CTX_STATE_ACTIVE)
+ return err;
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_MTU);
+ if (err)
+ return err;
+
+ cmd.req.arg[1] = recv_ctx->context_id;
+ cmd.req.arg[2] = mtu;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "Failed to set mtu\n");
+ err = -EIO;
+ }
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ dma_addr_t hostrq_phys_addr, cardrsp_phys_addr;
+ struct net_device *netdev = adapter->netdev;
+ u32 temp_intr_crb_mode, temp_rds_crb_mode;
+ struct qlcnic_cardrsp_rds_ring *prsp_rds;
+ struct qlcnic_cardrsp_sds_ring *prsp_sds;
+ struct qlcnic_hostrq_rds_ring *prq_rds;
+ struct qlcnic_hostrq_sds_ring *prq_sds;
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_cardrsp_rx_ctx *prsp;
+ struct qlcnic_hostrq_rx_ctx *prq;
+ u8 i, nrds_rings, nsds_rings;
+ struct qlcnic_cmd_args cmd;
+ size_t rq_size, rsp_size;
+ u32 cap, reg, val, reg2;
+ u64 phys_addr;
+ u16 temp_u16;
+ void *addr;
+ int err;
+
+ nrds_rings = adapter->max_rds_rings;
+ nsds_rings = adapter->drv_sds_rings;
+
+ rq_size = SIZEOF_HOSTRQ_RX(struct qlcnic_hostrq_rx_ctx, nrds_rings,
+ nsds_rings);
+ rsp_size = SIZEOF_CARDRSP_RX(struct qlcnic_cardrsp_rx_ctx, nrds_rings,
+ nsds_rings);
+
+ addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
+ &hostrq_phys_addr, GFP_KERNEL);
+ if (addr == NULL)
+ return -ENOMEM;
+ prq = addr;
+
+ addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size,
+ &cardrsp_phys_addr, GFP_KERNEL);
+ if (addr == NULL) {
+ err = -ENOMEM;
+ goto out_free_rq;
+ }
+ prsp = addr;
+
+ prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr);
+
+ cap = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN
+ | QLCNIC_CAP0_VALIDOFF);
+ cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS);
+
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test) {
+ cap |= QLCNIC_CAP0_TX_MULTI;
+ } else {
+ temp_u16 = offsetof(struct qlcnic_hostrq_rx_ctx, msix_handler);
+ prq->valid_field_offset = cpu_to_le16(temp_u16);
+ prq->txrx_sds_binding = nsds_rings - 1;
+ temp_intr_crb_mode = QLCNIC_HOST_INT_CRB_MODE_SHARED;
+ prq->host_int_crb_mode = cpu_to_le32(temp_intr_crb_mode);
+ temp_rds_crb_mode = QLCNIC_HOST_RDS_CRB_MODE_UNIQUE;
+ prq->host_rds_crb_mode = cpu_to_le32(temp_rds_crb_mode);
+ }
+
+ prq->capabilities[0] = cpu_to_le32(cap);
+
+ prq->num_rds_rings = cpu_to_le16(nrds_rings);
+ prq->num_sds_rings = cpu_to_le16(nsds_rings);
+ prq->rds_ring_offset = 0;
+
+ val = le32_to_cpu(prq->rds_ring_offset) +
+ (sizeof(struct qlcnic_hostrq_rds_ring) * nrds_rings);
+ prq->sds_ring_offset = cpu_to_le32(val);
+
+ prq_rds = (struct qlcnic_hostrq_rds_ring *)(prq->data +
+ le32_to_cpu(prq->rds_ring_offset));
+
+ for (i = 0; i < nrds_rings; i++) {
+ rds_ring = &recv_ctx->rds_rings[i];
+ rds_ring->producer = 0;
+ prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr);
+ prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc);
+ prq_rds[i].ring_kind = cpu_to_le32(i);
+ prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size);
+ }
+
+ prq_sds = (struct qlcnic_hostrq_sds_ring *)(prq->data +
+ le32_to_cpu(prq->sds_ring_offset));
+
+ for (i = 0; i < nsds_rings; i++) {
+ sds_ring = &recv_ctx->sds_rings[i];
+ sds_ring->consumer = 0;
+ memset(sds_ring->desc_head, 0, STATUS_DESC_RINGSIZE(sds_ring));
+ prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr);
+ prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc);
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test)
+ prq_sds[i].msi_index = cpu_to_le16(ahw->intr_tbl[i].id);
+ else
+ prq_sds[i].msi_index = cpu_to_le16(i);
+ }
+
+ phys_addr = hostrq_phys_addr;
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_RX_CTX);
+ if (err)
+ goto out_free_rsp;
+
+ cmd.req.arg[1] = MSD(phys_addr);
+ cmd.req.arg[2] = LSD(phys_addr);
+ cmd.req.arg[3] = rq_size;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to create rx ctx in firmware%d\n", err);
+ goto out_free_rsp;
+ }
+
+ prsp_rds = ((struct qlcnic_cardrsp_rds_ring *)
+ &prsp->data[le32_to_cpu(prsp->rds_ring_offset)]);
+
+ for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) {
+ rds_ring = &recv_ctx->rds_rings[i];
+ reg = le32_to_cpu(prsp_rds[i].host_producer_crb);
+ rds_ring->crb_rcv_producer = ahw->pci_base0 + reg;
+ }
+
+ prsp_sds = ((struct qlcnic_cardrsp_sds_ring *)
+ &prsp->data[le32_to_cpu(prsp->sds_ring_offset)]);
+
+ for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) {
+ sds_ring = &recv_ctx->sds_rings[i];
+ reg = le32_to_cpu(prsp_sds[i].host_consumer_crb);
+ if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test)
+ reg2 = ahw->intr_tbl[i].src;
+ else
+ reg2 = le32_to_cpu(prsp_sds[i].interrupt_crb);
+
+ sds_ring->crb_intr_mask = ahw->pci_base0 + reg2;
+ sds_ring->crb_sts_consumer = ahw->pci_base0 + reg;
+ }
+
+ recv_ctx->state = le32_to_cpu(prsp->host_ctx_state);
+ recv_ctx->context_id = le16_to_cpu(prsp->context_id);
+ recv_ctx->virt_port = prsp->virt_port;
+
+ netdev_info(netdev, "Rx Context[%d] Created, state 0x%x\n",
+ recv_ctx->context_id, recv_ctx->state);
+ qlcnic_free_mbx_args(&cmd);
+
+out_free_rsp:
+ dma_free_coherent(&adapter->pdev->dev, rsp_size, prsp,
+ cardrsp_phys_addr);
+out_free_rq:
+ dma_free_coherent(&adapter->pdev->dev, rq_size, prq, hostrq_phys_addr);
+
+ return err;
+}
+
+void qlcnic_82xx_fw_cmd_del_rx_ctx(struct qlcnic_adapter *adapter)
+{
+ int err;
+ struct qlcnic_cmd_args cmd;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_RX_CTX);
+ if (err)
+ return;
+
+ cmd.req.arg[1] = recv_ctx->context_id;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_err(&adapter->pdev->dev,
+ "Failed to destroy rx ctx in firmware\n");
+
+ recv_ctx->state = QLCNIC_HOST_CTX_STATE_FREED;
+ qlcnic_free_mbx_args(&cmd);
+}
+
+int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring,
+ int ring)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct net_device *netdev = adapter->netdev;
+ struct qlcnic_hostrq_tx_ctx *prq;
+ struct qlcnic_hostrq_cds_ring *prq_cds;
+ struct qlcnic_cardrsp_tx_ctx *prsp;
+ struct qlcnic_cmd_args cmd;
+ u32 temp, intr_mask, temp_int_crb_mode;
+ dma_addr_t rq_phys_addr, rsp_phys_addr;
+ int temp_nsds_rings, index, err;
+ void *rq_addr, *rsp_addr;
+ size_t rq_size, rsp_size;
+ u64 phys_addr;
+ u16 msix_id;
+
+ /* reset host resources */
+ tx_ring->producer = 0;
+ tx_ring->sw_consumer = 0;
+ *(tx_ring->hw_consumer) = 0;
+
+ rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx);
+ rq_addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
+ &rq_phys_addr, GFP_KERNEL);
+ if (!rq_addr)
+ return -ENOMEM;
+
+ rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx);
+ rsp_addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size,
+ &rsp_phys_addr, GFP_KERNEL);
+ if (!rsp_addr) {
+ err = -ENOMEM;
+ goto out_free_rq;
+ }
+
+ prq = rq_addr;
+ prsp = rsp_addr;
+
+ prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr);
+
+ temp = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN |
+ QLCNIC_CAP0_LSO);
+ if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test)
+ temp |= QLCNIC_CAP0_TX_MULTI;
+
+ prq->capabilities[0] = cpu_to_le32(temp);
+
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test) {
+ temp_nsds_rings = adapter->drv_sds_rings;
+ index = temp_nsds_rings + ring;
+ msix_id = ahw->intr_tbl[index].id;
+ prq->msi_index = cpu_to_le16(msix_id);
+ } else {
+ temp_int_crb_mode = QLCNIC_HOST_INT_CRB_MODE_SHARED;
+ prq->host_int_crb_mode = cpu_to_le32(temp_int_crb_mode);
+ prq->msi_index = 0;
+ }
+
+ prq->interrupt_ctl = 0;
+ prq->cmd_cons_dma_addr = cpu_to_le64(tx_ring->hw_cons_phys_addr);
+
+ prq_cds = &prq->cds_ring;
+
+ prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr);
+ prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc);
+
+ phys_addr = rq_phys_addr;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_TX_CTX);
+ if (err)
+ goto out_free_rsp;
+
+ cmd.req.arg[1] = MSD(phys_addr);
+ cmd.req.arg[2] = LSD(phys_addr);
+ cmd.req.arg[3] = rq_size;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+
+ if (err == QLCNIC_RCODE_SUCCESS) {
+ tx_ring->state = le32_to_cpu(prsp->host_ctx_state);
+ temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
+ tx_ring->crb_cmd_producer = adapter->ahw->pci_base0 + temp;
+ tx_ring->ctx_id = le16_to_cpu(prsp->context_id);
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ index = adapter->drv_sds_rings + ring;
+ intr_mask = ahw->intr_tbl[index].src;
+ tx_ring->crb_intr_mask = ahw->pci_base0 + intr_mask;
+ }
+
+ netdev_info(netdev, "Tx Context[0x%x] Created, state 0x%x\n",
+ tx_ring->ctx_id, tx_ring->state);
+ } else {
+ netdev_err(netdev, "Failed to create tx ctx in firmware%d\n",
+ err);
+ err = -EIO;
+ }
+ qlcnic_free_mbx_args(&cmd);
+
+out_free_rsp:
+ dma_free_coherent(&adapter->pdev->dev, rsp_size, rsp_addr,
+ rsp_phys_addr);
+out_free_rq:
+ dma_free_coherent(&adapter->pdev->dev, rq_size, rq_addr, rq_phys_addr);
+
+ return err;
+}
+
+void qlcnic_82xx_fw_cmd_del_tx_ctx(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ struct qlcnic_cmd_args cmd;
+ int ret;
+
+ ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_TX_CTX);
+ if (ret)
+ return;
+
+ cmd.req.arg[1] = tx_ring->ctx_id;
+ if (qlcnic_issue_cmd(adapter, &cmd))
+ dev_err(&adapter->pdev->dev,
+ "Failed to destroy tx ctx in firmware\n");
+ qlcnic_free_mbx_args(&cmd);
+}
+
+int
+qlcnic_fw_cmd_set_port(struct qlcnic_adapter *adapter, u32 config)
+{
+ int err;
+ struct qlcnic_cmd_args cmd;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_PORT);
+ if (err)
+ return err;
+
+ cmd.req.arg[1] = config;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
+{
+ void *addr;
+ int err, ring;
+ struct qlcnic_recv_context *recv_ctx;
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
+ __le32 *ptr;
+
+ struct pci_dev *pdev = adapter->pdev;
+
+ recv_ctx = adapter->recv_ctx;
+
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ ptr = (__le32 *)dma_alloc_coherent(&pdev->dev, sizeof(u32),
+ &tx_ring->hw_cons_phys_addr,
+ GFP_KERNEL);
+ if (ptr == NULL) {
+ err = -ENOMEM;
+ goto err_out_free;
+ }
+
+ tx_ring->hw_consumer = ptr;
+ /* cmd desc ring */
+ addr = dma_alloc_coherent(&pdev->dev, TX_DESC_RINGSIZE(tx_ring),
+ &tx_ring->phys_addr,
+ GFP_KERNEL);
+ if (addr == NULL) {
+ err = -ENOMEM;
+ goto err_out_free;
+ }
+
+ tx_ring->desc_head = addr;
+ }
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+ addr = dma_alloc_coherent(&adapter->pdev->dev,
+ RCV_DESC_RINGSIZE(rds_ring),
+ &rds_ring->phys_addr, GFP_KERNEL);
+ if (addr == NULL) {
+ err = -ENOMEM;
+ goto err_out_free;
+ }
+ rds_ring->desc_head = addr;
+
+ }
+
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+
+ addr = dma_alloc_coherent(&adapter->pdev->dev,
+ STATUS_DESC_RINGSIZE(sds_ring),
+ &sds_ring->phys_addr, GFP_KERNEL);
+ if (addr == NULL) {
+ err = -ENOMEM;
+ goto err_out_free;
+ }
+ sds_ring->desc_head = addr;
+ }
+
+ return 0;
+
+err_out_free:
+ qlcnic_free_hw_resources(adapter);
+ return err;
+}
+
+int qlcnic_fw_create_ctx(struct qlcnic_adapter *dev)
+{
+ int i, err, ring;
+
+ if (dev->flags & QLCNIC_NEED_FLR) {
+ err = pci_reset_function(dev->pdev);
+ if (err) {
+ dev_err(&dev->pdev->dev,
+ "Adapter reset failed (%d). Please reboot\n",
+ err);
+ return err;
+ }
+ dev->flags &= ~QLCNIC_NEED_FLR;
+ }
+
+ if (qlcnic_83xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED)) {
+ if (dev->ahw->diag_test != QLCNIC_LOOPBACK_TEST) {
+ err = qlcnic_83xx_config_intrpt(dev, 1);
+ if (err)
+ return err;
+ }
+ }
+
+ if (qlcnic_82xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED) &&
+ qlcnic_check_multi_tx(dev) && !dev->ahw->diag_test) {
+ err = qlcnic_82xx_mq_intrpt(dev, 1);
+ if (err)
+ return err;
+ }
+
+ err = qlcnic_fw_cmd_create_rx_ctx(dev);
+ if (err)
+ goto err_out;
+
+ for (ring = 0; ring < dev->drv_tx_rings; ring++) {
+ err = qlcnic_fw_cmd_create_tx_ctx(dev,
+ &dev->tx_ring[ring],
+ ring);
+ if (err) {
+ qlcnic_fw_cmd_del_rx_ctx(dev);
+ if (ring == 0)
+ goto err_out;
+
+ for (i = 0; i < ring; i++)
+ qlcnic_fw_cmd_del_tx_ctx(dev, &dev->tx_ring[i]);
+
+ goto err_out;
+ }
+ }
+
+ set_bit(__QLCNIC_FW_ATTACHED, &dev->state);
+
+ return 0;
+
+err_out:
+ if (qlcnic_82xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED) &&
+ qlcnic_check_multi_tx(dev) && !dev->ahw->diag_test)
+ qlcnic_82xx_config_intrpt(dev, 0);
+
+ if (qlcnic_83xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED)) {
+ if (dev->ahw->diag_test != QLCNIC_LOOPBACK_TEST)
+ qlcnic_83xx_config_intrpt(dev, 0);
+ }
+
+ return err;
+}
+
+void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter)
+{
+ int ring;
+
+ if (test_and_clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) {
+ qlcnic_fw_cmd_del_rx_ctx(adapter);
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++)
+ qlcnic_fw_cmd_del_tx_ctx(adapter,
+ &adapter->tx_ring[ring]);
+
+ if (qlcnic_82xx_check(adapter) &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED) &&
+ qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test)
+ qlcnic_82xx_config_intrpt(adapter, 0);
+
+ if (qlcnic_83xx_check(adapter) &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST)
+ qlcnic_83xx_config_intrpt(adapter, 0);
+ }
+ /* Allow dma queues to drain after context reset */
+ mdelay(20);
+ }
+}
+
+void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_recv_context *recv_ctx;
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
+ int ring;
+
+ recv_ctx = adapter->recv_ctx;
+
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ if (tx_ring->hw_consumer != NULL) {
+ dma_free_coherent(&adapter->pdev->dev, sizeof(u32),
+ tx_ring->hw_consumer,
+ tx_ring->hw_cons_phys_addr);
+
+ tx_ring->hw_consumer = NULL;
+ }
+
+ if (tx_ring->desc_head != NULL) {
+ dma_free_coherent(&adapter->pdev->dev,
+ TX_DESC_RINGSIZE(tx_ring),
+ tx_ring->desc_head,
+ tx_ring->phys_addr);
+ tx_ring->desc_head = NULL;
+ }
+ }
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+
+ if (rds_ring->desc_head != NULL) {
+ dma_free_coherent(&adapter->pdev->dev,
+ RCV_DESC_RINGSIZE(rds_ring),
+ rds_ring->desc_head,
+ rds_ring->phys_addr);
+ rds_ring->desc_head = NULL;
+ }
+ }
+
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+
+ if (sds_ring->desc_head != NULL) {
+ dma_free_coherent(&adapter->pdev->dev,
+ STATUS_DESC_RINGSIZE(sds_ring),
+ sds_ring->desc_head,
+ sds_ring->phys_addr);
+ sds_ring->desc_head = NULL;
+ }
+ }
+}
+
+int qlcnic_82xx_config_intrpt(struct qlcnic_adapter *adapter, u8 op_type)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct net_device *netdev = adapter->netdev;
+ struct qlcnic_cmd_args cmd;
+ u32 type, val;
+ int i, err = 0;
+
+ for (i = 0; i < ahw->num_msix; i++) {
+ err = qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_MQ_TX_CONFIG_INTR);
+ if (err)
+ return err;
+ type = op_type ? QLCNIC_INTRPT_ADD : QLCNIC_INTRPT_DEL;
+ val = type | (ahw->intr_tbl[i].type << 4);
+ if (ahw->intr_tbl[i].type == QLCNIC_INTRPT_MSIX)
+ val |= (ahw->intr_tbl[i].id << 16);
+ cmd.req.arg[1] = val;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ netdev_err(netdev, "Failed to %s interrupts %d\n",
+ op_type == QLCNIC_INTRPT_ADD ? "Add" :
+ "Delete", err);
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+ }
+ val = cmd.rsp.arg[1];
+ if (LSB(val)) {
+ netdev_info(netdev,
+ "failed to configure interrupt for %d\n",
+ ahw->intr_tbl[i].id);
+ continue;
+ }
+ if (op_type) {
+ ahw->intr_tbl[i].id = MSW(val);
+ ahw->intr_tbl[i].enabled = 1;
+ ahw->intr_tbl[i].src = cmd.rsp.arg[2];
+ } else {
+ ahw->intr_tbl[i].id = i;
+ ahw->intr_tbl[i].enabled = 0;
+ ahw->intr_tbl[i].src = 0;
+ }
+ qlcnic_free_mbx_args(&cmd);
+ }
+
+ return err;
+}
+
+int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac,
+ u8 function)
+{
+ int err, i;
+ struct qlcnic_cmd_args cmd;
+ u32 mac_low, mac_high;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MAC_ADDRESS);
+ if (err)
+ return err;
+
+ cmd.req.arg[1] = function | BIT_8;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+
+ if (err == QLCNIC_RCODE_SUCCESS) {
+ mac_low = cmd.rsp.arg[1];
+ mac_high = cmd.rsp.arg[2];
+
+ for (i = 0; i < 2; i++)
+ mac[i] = (u8) (mac_high >> ((1 - i) * 8));
+ for (i = 2; i < 6; i++)
+ mac[i] = (u8) (mac_low >> ((5 - i) * 8));
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "Failed to get mac address%d\n", err);
+ err = -EIO;
+ }
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+/* Get info of a NIC partition */
+int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *npar_info, u8 func_id)
+{
+ int err;
+ dma_addr_t nic_dma_t;
+ const struct qlcnic_info_le *nic_info;
+ void *nic_info_addr;
+ struct qlcnic_cmd_args cmd;
+ size_t nic_size = sizeof(struct qlcnic_info_le);
+
+ nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
+ &nic_dma_t, GFP_KERNEL);
+ if (!nic_info_addr)
+ return -ENOMEM;
+
+ nic_info = nic_info_addr;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO);
+ if (err)
+ goto out_free_dma;
+
+ cmd.req.arg[1] = MSD(nic_dma_t);
+ cmd.req.arg[2] = LSD(nic_dma_t);
+ cmd.req.arg[3] = (func_id << 16 | nic_size);
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err != QLCNIC_RCODE_SUCCESS) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to get nic info%d\n", err);
+ err = -EIO;
+ } else {
+ npar_info->pci_func = le16_to_cpu(nic_info->pci_func);
+ npar_info->op_mode = le16_to_cpu(nic_info->op_mode);
+ npar_info->min_tx_bw = le16_to_cpu(nic_info->min_tx_bw);
+ npar_info->max_tx_bw = le16_to_cpu(nic_info->max_tx_bw);
+ npar_info->phys_port = le16_to_cpu(nic_info->phys_port);
+ npar_info->switch_mode = le16_to_cpu(nic_info->switch_mode);
+ npar_info->max_tx_ques = le16_to_cpu(nic_info->max_tx_ques);
+ npar_info->max_rx_ques = le16_to_cpu(nic_info->max_rx_ques);
+ npar_info->capabilities = le32_to_cpu(nic_info->capabilities);
+ npar_info->max_mtu = le16_to_cpu(nic_info->max_mtu);
+ }
+
+ qlcnic_free_mbx_args(&cmd);
+out_free_dma:
+ dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr,
+ nic_dma_t);
+
+ return err;
+}
+
+/* Configure a NIC partition */
+int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *nic)
+{
+ int err = -EIO;
+ dma_addr_t nic_dma_t;
+ void *nic_info_addr;
+ struct qlcnic_cmd_args cmd;
+ struct qlcnic_info_le *nic_info;
+ size_t nic_size = sizeof(struct qlcnic_info_le);
+
+ if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
+ return err;
+
+ nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
+ &nic_dma_t, GFP_KERNEL);
+ if (!nic_info_addr)
+ return -ENOMEM;
+
+ nic_info = nic_info_addr;
+
+ nic_info->pci_func = cpu_to_le16(nic->pci_func);
+ nic_info->op_mode = cpu_to_le16(nic->op_mode);
+ nic_info->phys_port = cpu_to_le16(nic->phys_port);
+ nic_info->switch_mode = cpu_to_le16(nic->switch_mode);
+ nic_info->capabilities = cpu_to_le32(nic->capabilities);
+ nic_info->max_mac_filters = nic->max_mac_filters;
+ nic_info->max_tx_ques = cpu_to_le16(nic->max_tx_ques);
+ nic_info->max_rx_ques = cpu_to_le16(nic->max_rx_ques);
+ nic_info->min_tx_bw = cpu_to_le16(nic->min_tx_bw);
+ nic_info->max_tx_bw = cpu_to_le16(nic->max_tx_bw);
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO);
+ if (err)
+ goto out_free_dma;
+
+ cmd.req.arg[1] = MSD(nic_dma_t);
+ cmd.req.arg[2] = LSD(nic_dma_t);
+ cmd.req.arg[3] = ((nic->pci_func << 16) | nic_size);
+ err = qlcnic_issue_cmd(adapter, &cmd);
+
+ if (err != QLCNIC_RCODE_SUCCESS) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to set nic info%d\n", err);
+ err = -EIO;
+ }
+
+ qlcnic_free_mbx_args(&cmd);
+out_free_dma:
+ dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr,
+ nic_dma_t);
+
+ return err;
+}
+
+/* Get PCI Info of a partition */
+int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_pci_info *pci_info)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ size_t npar_size = sizeof(struct qlcnic_pci_info_le);
+ size_t pci_size = npar_size * ahw->max_vnic_func;
+ u16 nic = 0, fcoe = 0, iscsi = 0;
+ struct qlcnic_pci_info_le *npar;
+ struct qlcnic_cmd_args cmd;
+ dma_addr_t pci_info_dma_t;
+ void *pci_info_addr;
+ int err = 0, i;
+
+ pci_info_addr = dma_alloc_coherent(&adapter->pdev->dev, pci_size,
+ &pci_info_dma_t, GFP_KERNEL);
+ if (!pci_info_addr)
+ return -ENOMEM;
+
+ npar = pci_info_addr;
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PCI_INFO);
+ if (err)
+ goto out_free_dma;
+
+ cmd.req.arg[1] = MSD(pci_info_dma_t);
+ cmd.req.arg[2] = LSD(pci_info_dma_t);
+ cmd.req.arg[3] = pci_size;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+
+ ahw->total_nic_func = 0;
+ if (err == QLCNIC_RCODE_SUCCESS) {
+ for (i = 0; i < ahw->max_vnic_func; i++, npar++, pci_info++) {
+ pci_info->id = le16_to_cpu(npar->id);
+ pci_info->active = le16_to_cpu(npar->active);
+ if (!pci_info->active)
+ continue;
+ pci_info->type = le16_to_cpu(npar->type);
+ err = qlcnic_get_pci_func_type(adapter, pci_info->type,
+ &nic, &fcoe, &iscsi);
+ pci_info->default_port =
+ le16_to_cpu(npar->default_port);
+ pci_info->tx_min_bw =
+ le16_to_cpu(npar->tx_min_bw);
+ pci_info->tx_max_bw =
+ le16_to_cpu(npar->tx_max_bw);
+ memcpy(pci_info->mac, npar->mac, ETH_ALEN);
+ }
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "Failed to get PCI Info%d\n", err);
+ err = -EIO;
+ }
+
+ ahw->total_nic_func = nic;
+ ahw->total_pci_func = nic + fcoe + iscsi;
+ if (ahw->total_nic_func == 0 || ahw->total_pci_func == 0) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Invalid function count: total nic func[%x], total pci func[%x]\n",
+ __func__, ahw->total_nic_func, ahw->total_pci_func);
+ err = -EIO;
+ }
+ qlcnic_free_mbx_args(&cmd);
+out_free_dma:
+ dma_free_coherent(&adapter->pdev->dev, pci_size, pci_info_addr,
+ pci_info_dma_t);
+
+ return err;
+}
+
+/* Configure eSwitch for port mirroring */
+int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id,
+ u8 enable_mirroring, u8 pci_func)
+{
+ struct device *dev = &adapter->pdev->dev;
+ struct qlcnic_cmd_args cmd;
+ int err = -EIO;
+ u32 arg1;
+
+ if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC ||
+ !(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE)) {
+ dev_err(&adapter->pdev->dev, "%s: Not a management function\n",
+ __func__);
+ return err;
+ }
+
+ arg1 = id | (enable_mirroring ? BIT_4 : 0);
+ arg1 |= pci_func << 8;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_SET_PORTMIRRORING);
+ if (err)
+ return err;
+
+ cmd.req.arg[1] = arg1;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+
+ if (err != QLCNIC_RCODE_SUCCESS)
+ dev_err(dev, "Failed to configure port mirroring for vNIC function %d on eSwitch %d\n",
+ pci_func, id);
+ else
+ dev_info(dev, "Configured port mirroring for vNIC function %d on eSwitch %d\n",
+ pci_func, id);
+ qlcnic_free_mbx_args(&cmd);
+
+ return err;
+}
+
+int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
+ const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) {
+
+ size_t stats_size = sizeof(struct qlcnic_esw_stats_le);
+ struct qlcnic_esw_stats_le *stats;
+ dma_addr_t stats_dma_t;
+ void *stats_addr;
+ u32 arg1;
+ struct qlcnic_cmd_args cmd;
+ int err;
+
+ if (esw_stats == NULL)
+ return -ENOMEM;
+
+ if ((adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) &&
+ (func != adapter->ahw->pci_func)) {
+ dev_err(&adapter->pdev->dev,
+ "Not privilege to query stats for func=%d", func);
+ return -EIO;
+ }
+
+ stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size,
+ &stats_dma_t, GFP_KERNEL);
+ if (!stats_addr)
+ return -ENOMEM;
+
+ arg1 = func | QLCNIC_STATS_VERSION << 8 | QLCNIC_STATS_PORT << 12;
+ arg1 |= rx_tx << 15 | stats_size << 16;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_GET_ESWITCH_STATS);
+ if (err)
+ goto out_free_dma;
+
+ cmd.req.arg[1] = arg1;
+ cmd.req.arg[2] = MSD(stats_dma_t);
+ cmd.req.arg[3] = LSD(stats_dma_t);
+ err = qlcnic_issue_cmd(adapter, &cmd);
+
+ if (!err) {
+ stats = stats_addr;
+ esw_stats->context_id = le16_to_cpu(stats->context_id);
+ esw_stats->version = le16_to_cpu(stats->version);
+ esw_stats->size = le16_to_cpu(stats->size);
+ esw_stats->multicast_frames =
+ le64_to_cpu(stats->multicast_frames);
+ esw_stats->broadcast_frames =
+ le64_to_cpu(stats->broadcast_frames);
+ esw_stats->unicast_frames = le64_to_cpu(stats->unicast_frames);
+ esw_stats->dropped_frames = le64_to_cpu(stats->dropped_frames);
+ esw_stats->local_frames = le64_to_cpu(stats->local_frames);
+ esw_stats->errors = le64_to_cpu(stats->errors);
+ esw_stats->numbytes = le64_to_cpu(stats->numbytes);
+ }
+
+ qlcnic_free_mbx_args(&cmd);
+out_free_dma:
+ dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr,
+ stats_dma_t);
+
+ return err;
+}
+
+/* This routine will retrieve the MAC statistics from firmware */
+int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter,
+ struct qlcnic_mac_statistics *mac_stats)
+{
+ struct qlcnic_mac_statistics_le *stats;
+ struct qlcnic_cmd_args cmd;
+ size_t stats_size = sizeof(struct qlcnic_mac_statistics_le);
+ dma_addr_t stats_dma_t;
+ void *stats_addr;
+ int err;
+
+ if (mac_stats == NULL)
+ return -ENOMEM;
+
+ stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size,
+ &stats_dma_t, GFP_KERNEL);
+ if (!stats_addr)
+ return -ENOMEM;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_MAC_STATS);
+ if (err)
+ goto out_free_dma;
+
+ cmd.req.arg[1] = stats_size << 16;
+ cmd.req.arg[2] = MSD(stats_dma_t);
+ cmd.req.arg[3] = LSD(stats_dma_t);
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (!err) {
+ stats = stats_addr;
+ mac_stats->mac_tx_frames = le64_to_cpu(stats->mac_tx_frames);
+ mac_stats->mac_tx_bytes = le64_to_cpu(stats->mac_tx_bytes);
+ mac_stats->mac_tx_mcast_pkts =
+ le64_to_cpu(stats->mac_tx_mcast_pkts);
+ mac_stats->mac_tx_bcast_pkts =
+ le64_to_cpu(stats->mac_tx_bcast_pkts);
+ mac_stats->mac_rx_frames = le64_to_cpu(stats->mac_rx_frames);
+ mac_stats->mac_rx_bytes = le64_to_cpu(stats->mac_rx_bytes);
+ mac_stats->mac_rx_mcast_pkts =
+ le64_to_cpu(stats->mac_rx_mcast_pkts);
+ mac_stats->mac_rx_length_error =
+ le64_to_cpu(stats->mac_rx_length_error);
+ mac_stats->mac_rx_length_small =
+ le64_to_cpu(stats->mac_rx_length_small);
+ mac_stats->mac_rx_length_large =
+ le64_to_cpu(stats->mac_rx_length_large);
+ mac_stats->mac_rx_jabber = le64_to_cpu(stats->mac_rx_jabber);
+ mac_stats->mac_rx_dropped = le64_to_cpu(stats->mac_rx_dropped);
+ mac_stats->mac_rx_crc_error = le64_to_cpu(stats->mac_rx_crc_error);
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "%s: Get mac stats failed, err=%d.\n", __func__, err);
+ }
+
+ qlcnic_free_mbx_args(&cmd);
+
+out_free_dma:
+ dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr,
+ stats_dma_t);
+
+ return err;
+}
+
+int qlcnic_get_eswitch_stats(struct qlcnic_adapter *adapter, const u8 eswitch,
+ const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) {
+
+ struct __qlcnic_esw_statistics port_stats;
+ u8 i;
+ int ret = -EIO;
+
+ if (esw_stats == NULL)
+ return -ENOMEM;
+ if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
+ return -EIO;
+ if (adapter->npars == NULL)
+ return -EIO;
+
+ memset(esw_stats, 0, sizeof(u64));
+ esw_stats->unicast_frames = QLCNIC_STATS_NOT_AVAIL;
+ esw_stats->multicast_frames = QLCNIC_STATS_NOT_AVAIL;
+ esw_stats->broadcast_frames = QLCNIC_STATS_NOT_AVAIL;
+ esw_stats->dropped_frames = QLCNIC_STATS_NOT_AVAIL;
+ esw_stats->errors = QLCNIC_STATS_NOT_AVAIL;
+ esw_stats->local_frames = QLCNIC_STATS_NOT_AVAIL;
+ esw_stats->numbytes = QLCNIC_STATS_NOT_AVAIL;
+ esw_stats->context_id = eswitch;
+
+ for (i = 0; i < adapter->ahw->total_nic_func; i++) {
+ if (adapter->npars[i].phy_port != eswitch)
+ continue;
+
+ memset(&port_stats, 0, sizeof(struct __qlcnic_esw_statistics));
+ if (qlcnic_get_port_stats(adapter, adapter->npars[i].pci_func,
+ rx_tx, &port_stats))
+ continue;
+
+ esw_stats->size = port_stats.size;
+ esw_stats->version = port_stats.version;
+ QLCNIC_ADD_ESW_STATS(esw_stats->unicast_frames,
+ port_stats.unicast_frames);
+ QLCNIC_ADD_ESW_STATS(esw_stats->multicast_frames,
+ port_stats.multicast_frames);
+ QLCNIC_ADD_ESW_STATS(esw_stats->broadcast_frames,
+ port_stats.broadcast_frames);
+ QLCNIC_ADD_ESW_STATS(esw_stats->dropped_frames,
+ port_stats.dropped_frames);
+ QLCNIC_ADD_ESW_STATS(esw_stats->errors,
+ port_stats.errors);
+ QLCNIC_ADD_ESW_STATS(esw_stats->local_frames,
+ port_stats.local_frames);
+ QLCNIC_ADD_ESW_STATS(esw_stats->numbytes,
+ port_stats.numbytes);
+ ret = 0;
+ }
+ return ret;
+}
+
+int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, const u8 func_esw,
+ const u8 port, const u8 rx_tx)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_cmd_args cmd;
+ int err;
+ u32 arg1;
+
+ if (ahw->op_mode != QLCNIC_MGMT_FUNC)
+ return -EIO;
+
+ if (func_esw == QLCNIC_STATS_PORT) {
+ if (port >= ahw->max_vnic_func)
+ goto err_ret;
+ } else if (func_esw == QLCNIC_STATS_ESWITCH) {
+ if (port >= QLCNIC_NIU_MAX_XG_PORTS)
+ goto err_ret;
+ } else {
+ goto err_ret;
+ }
+
+ if (rx_tx > QLCNIC_QUERY_TX_COUNTER)
+ goto err_ret;
+
+ arg1 = port | QLCNIC_STATS_VERSION << 8 | func_esw << 12;
+ arg1 |= BIT_14 | rx_tx << 15;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_GET_ESWITCH_STATS);
+ if (err)
+ return err;
+
+ cmd.req.arg[1] = arg1;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+
+err_ret:
+ dev_err(&adapter->pdev->dev,
+ "Invalid args func_esw %d port %d rx_ctx %d\n",
+ func_esw, port, rx_tx);
+ return -EIO;
+}
+
+static int __qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter,
+ u32 *arg1, u32 *arg2)
+{
+ struct device *dev = &adapter->pdev->dev;
+ struct qlcnic_cmd_args cmd;
+ u8 pci_func = *arg1 >> 8;
+ int err;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG);
+ if (err)
+ return err;
+
+ cmd.req.arg[1] = *arg1;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ *arg1 = cmd.rsp.arg[1];
+ *arg2 = cmd.rsp.arg[2];
+ qlcnic_free_mbx_args(&cmd);
+
+ if (err == QLCNIC_RCODE_SUCCESS)
+ dev_info(dev, "Get eSwitch port config for vNIC function %d\n",
+ pci_func);
+ else
+ dev_err(dev, "Failed to get eswitch port config for vNIC function %d\n",
+ pci_func);
+ return err;
+}
+/* Configure eSwitch port
+op_mode = 0 for setting default port behavior
+op_mode = 1 for setting vlan id
+op_mode = 2 for deleting vlan id
+op_type = 0 for vlan_id
+op_type = 1 for port vlan_id
+*/
+int qlcnic_config_switch_port(struct qlcnic_adapter *adapter,
+ struct qlcnic_esw_func_cfg *esw_cfg)
+{
+ struct device *dev = &adapter->pdev->dev;
+ struct qlcnic_cmd_args cmd;
+ int err = -EIO, index;
+ u32 arg1, arg2 = 0;
+ u8 pci_func;
+
+ if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) {
+ dev_err(&adapter->pdev->dev, "%s: Not a management function\n",
+ __func__);
+ return err;
+ }
+
+ pci_func = esw_cfg->pci_func;
+ index = qlcnic_is_valid_nic_func(adapter, pci_func);
+ if (index < 0)
+ return err;
+ arg1 = (adapter->npars[index].phy_port & BIT_0);
+ arg1 |= (pci_func << 8);
+
+ if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2))
+ return err;
+ arg1 &= ~(0x0ff << 8);
+ arg1 |= (pci_func << 8);
+ arg1 &= ~(BIT_2 | BIT_3);
+ switch (esw_cfg->op_mode) {
+ case QLCNIC_PORT_DEFAULTS:
+ arg1 |= (BIT_4 | BIT_6 | BIT_7);
+ arg2 |= (BIT_0 | BIT_1);
+ if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO)
+ arg2 |= (BIT_2 | BIT_3);
+ if (!(esw_cfg->discard_tagged))
+ arg1 &= ~BIT_4;
+ if (!(esw_cfg->promisc_mode))
+ arg1 &= ~BIT_6;
+ if (!(esw_cfg->mac_override))
+ arg1 &= ~BIT_7;
+ if (!(esw_cfg->mac_anti_spoof))
+ arg2 &= ~BIT_0;
+ if (!(esw_cfg->offload_flags & BIT_0))
+ arg2 &= ~(BIT_1 | BIT_2 | BIT_3);
+ if (!(esw_cfg->offload_flags & BIT_1))
+ arg2 &= ~BIT_2;
+ if (!(esw_cfg->offload_flags & BIT_2))
+ arg2 &= ~BIT_3;
+ break;
+ case QLCNIC_ADD_VLAN:
+ arg1 &= ~(0x0ffff << 16);
+ arg1 |= (BIT_2 | BIT_5);
+ arg1 |= (esw_cfg->vlan_id << 16);
+ break;
+ case QLCNIC_DEL_VLAN:
+ arg1 |= (BIT_3 | BIT_5);
+ arg1 &= ~(0x0ffff << 16);
+ break;
+ default:
+ dev_err(&adapter->pdev->dev, "%s: Invalid opmode 0x%x\n",
+ __func__, esw_cfg->op_mode);
+ return err;
+ }
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_CONFIGURE_ESWITCH);
+ if (err)
+ return err;
+
+ cmd.req.arg[1] = arg1;
+ cmd.req.arg[2] = arg2;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ qlcnic_free_mbx_args(&cmd);
+
+ if (err != QLCNIC_RCODE_SUCCESS)
+ dev_err(dev, "Failed to configure eswitch for vNIC function %d\n",
+ pci_func);
+ else
+ dev_info(dev, "Configured eSwitch for vNIC function %d\n",
+ pci_func);
+
+ return err;
+}
+
+int
+qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter,
+ struct qlcnic_esw_func_cfg *esw_cfg)
+{
+ u32 arg1, arg2;
+ int index;
+ u8 phy_port;
+
+ if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC) {
+ index = qlcnic_is_valid_nic_func(adapter, esw_cfg->pci_func);
+ if (index < 0)
+ return -EIO;
+ phy_port = adapter->npars[index].phy_port;
+ } else {
+ phy_port = adapter->ahw->physical_port;
+ }
+ arg1 = phy_port;
+ arg1 |= (esw_cfg->pci_func << 8);
+ if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2))
+ return -EIO;
+
+ esw_cfg->discard_tagged = !!(arg1 & BIT_4);
+ esw_cfg->host_vlan_tag = !!(arg1 & BIT_5);
+ esw_cfg->promisc_mode = !!(arg1 & BIT_6);
+ esw_cfg->mac_override = !!(arg1 & BIT_7);
+ esw_cfg->vlan_id = LSW(arg1 >> 16);
+ esw_cfg->mac_anti_spoof = (arg2 & 0x1);
+ esw_cfg->offload_flags = ((arg2 >> 1) & 0x7);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
new file mode 100644
index 000000000..4d638f60f
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
@@ -0,0 +1,1146 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ */
+
+#include <linux/types.h>
+#include "qlcnic.h"
+
+#define QLC_DCB_NUM_PARAM 3
+#define QLC_DCB_LOCAL_IDX 0
+#define QLC_DCB_OPER_IDX 1
+#define QLC_DCB_PEER_IDX 2
+
+#define QLC_DCB_GET_MAP(V) (1 << V)
+
+#define QLC_DCB_FW_VER 0x2
+#define QLC_DCB_MAX_TC 0x8
+#define QLC_DCB_MAX_APP 0x8
+#define QLC_DCB_MAX_PRIO QLC_DCB_MAX_TC
+#define QLC_DCB_MAX_PG QLC_DCB_MAX_TC
+
+#define QLC_DCB_TSA_SUPPORT(V) (V & 0x1)
+#define QLC_DCB_ETS_SUPPORT(V) ((V >> 1) & 0x1)
+#define QLC_DCB_VERSION_SUPPORT(V) ((V >> 2) & 0xf)
+#define QLC_DCB_MAX_NUM_TC(V) ((V >> 20) & 0xf)
+#define QLC_DCB_MAX_NUM_ETS_TC(V) ((V >> 24) & 0xf)
+#define QLC_DCB_MAX_NUM_PFC_TC(V) ((V >> 28) & 0xf)
+#define QLC_DCB_GET_TC_PRIO(X, P) ((X >> (P * 3)) & 0x7)
+#define QLC_DCB_GET_PGID_PRIO(X, P) ((X >> (P * 8)) & 0xff)
+#define QLC_DCB_GET_BWPER_PG(X, P) ((X >> (P * 8)) & 0xff)
+#define QLC_DCB_GET_TSA_PG(X, P) ((X >> (P * 8)) & 0xff)
+#define QLC_DCB_GET_PFC_PRIO(X, P) (((X >> 24) >> P) & 0x1)
+#define QLC_DCB_GET_PROTO_ID_APP(X) ((X >> 8) & 0xffff)
+#define QLC_DCB_GET_SELECTOR_APP(X) (X & 0xff)
+
+#define QLC_DCB_LOCAL_PARAM_FWID 0x3
+#define QLC_DCB_OPER_PARAM_FWID 0x1
+#define QLC_DCB_PEER_PARAM_FWID 0x2
+
+#define QLC_83XX_DCB_GET_NUMAPP(X) ((X >> 2) & 0xf)
+#define QLC_83XX_DCB_TSA_VALID(X) (X & 0x1)
+#define QLC_83XX_DCB_PFC_VALID(X) ((X >> 1) & 0x1)
+#define QLC_83XX_DCB_GET_PRIOMAP_APP(X) (X >> 24)
+
+#define QLC_82XX_DCB_GET_NUMAPP(X) ((X >> 12) & 0xf)
+#define QLC_82XX_DCB_TSA_VALID(X) ((X >> 4) & 0x1)
+#define QLC_82XX_DCB_PFC_VALID(X) ((X >> 5) & 0x1)
+#define QLC_82XX_DCB_GET_PRIOVAL_APP(X) ((X >> 24) & 0x7)
+#define QLC_82XX_DCB_GET_PRIOMAP_APP(X) (1 << X)
+#define QLC_82XX_DCB_PRIO_TC_MAP (0x76543210)
+
+static const struct dcbnl_rtnl_ops qlcnic_dcbnl_ops;
+
+static void qlcnic_dcb_aen_work(struct work_struct *);
+static void qlcnic_dcb_data_cee_param_map(struct qlcnic_adapter *);
+
+static inline void __qlcnic_init_dcbnl_ops(struct qlcnic_dcb *);
+static void __qlcnic_dcb_free(struct qlcnic_dcb *);
+static int __qlcnic_dcb_attach(struct qlcnic_dcb *);
+static int __qlcnic_dcb_query_hw_capability(struct qlcnic_dcb *, char *);
+static void __qlcnic_dcb_get_info(struct qlcnic_dcb *);
+
+static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_dcb *);
+static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_dcb *, char *, u8);
+static int qlcnic_82xx_dcb_get_cee_cfg(struct qlcnic_dcb *);
+static void qlcnic_82xx_dcb_aen_handler(struct qlcnic_dcb *, void *);
+
+static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_dcb *);
+static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_dcb *, char *, u8);
+static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_dcb *);
+static void qlcnic_83xx_dcb_aen_handler(struct qlcnic_dcb *, void *);
+
+struct qlcnic_dcb_capability {
+ bool tsa_capability;
+ bool ets_capability;
+ u8 max_num_tc;
+ u8 max_ets_tc;
+ u8 max_pfc_tc;
+ u8 dcb_capability;
+};
+
+struct qlcnic_dcb_param {
+ u32 hdr_prio_pfc_map[2];
+ u32 prio_pg_map[2];
+ u32 pg_bw_map[2];
+ u32 pg_tsa_map[2];
+ u32 app[QLC_DCB_MAX_APP];
+};
+
+struct qlcnic_dcb_mbx_params {
+ /* 1st local, 2nd operational 3rd remote */
+ struct qlcnic_dcb_param type[3];
+ u32 prio_tc_map;
+};
+
+struct qlcnic_82xx_dcb_param_mbx_le {
+ __le32 hdr_prio_pfc_map[2];
+ __le32 prio_pg_map[2];
+ __le32 pg_bw_map[2];
+ __le32 pg_tsa_map[2];
+ __le32 app[QLC_DCB_MAX_APP];
+};
+
+enum qlcnic_dcb_selector {
+ QLC_SELECTOR_DEF = 0x0,
+ QLC_SELECTOR_ETHER,
+ QLC_SELECTOR_TCP,
+ QLC_SELECTOR_UDP,
+};
+
+enum qlcnic_dcb_prio_type {
+ QLC_PRIO_NONE = 0,
+ QLC_PRIO_GROUP,
+ QLC_PRIO_LINK,
+};
+
+enum qlcnic_dcb_pfc_type {
+ QLC_PFC_DISABLED = 0,
+ QLC_PFC_FULL,
+ QLC_PFC_TX,
+ QLC_PFC_RX
+};
+
+struct qlcnic_dcb_prio_cfg {
+ bool valid;
+ enum qlcnic_dcb_pfc_type pfc_type;
+};
+
+struct qlcnic_dcb_pg_cfg {
+ bool valid;
+ u8 total_bw_percent; /* of Link/ port BW */
+ u8 prio_count;
+ u8 tsa_type;
+};
+
+struct qlcnic_dcb_tc_cfg {
+ bool valid;
+ struct qlcnic_dcb_prio_cfg prio_cfg[QLC_DCB_MAX_PRIO];
+ enum qlcnic_dcb_prio_type prio_type; /* always prio_link */
+ u8 link_percent; /* % of link bandwidth */
+ u8 bwg_percent; /* % of BWG's bandwidth */
+ u8 up_tc_map;
+ u8 pgid;
+};
+
+struct qlcnic_dcb_app {
+ bool valid;
+ enum qlcnic_dcb_selector selector;
+ u16 protocol;
+ u8 priority;
+};
+
+struct qlcnic_dcb_cee {
+ struct qlcnic_dcb_tc_cfg tc_cfg[QLC_DCB_MAX_TC];
+ struct qlcnic_dcb_pg_cfg pg_cfg[QLC_DCB_MAX_PG];
+ struct qlcnic_dcb_app app[QLC_DCB_MAX_APP];
+ bool tc_param_valid;
+ bool pfc_mode_enable;
+};
+
+struct qlcnic_dcb_cfg {
+ /* 0 - local, 1 - operational, 2 - remote */
+ struct qlcnic_dcb_cee type[QLC_DCB_NUM_PARAM];
+ struct qlcnic_dcb_capability capability;
+ u32 version;
+};
+
+static const struct qlcnic_dcb_ops qlcnic_83xx_dcb_ops = {
+ .init_dcbnl_ops = __qlcnic_init_dcbnl_ops,
+ .free = __qlcnic_dcb_free,
+ .attach = __qlcnic_dcb_attach,
+ .query_hw_capability = __qlcnic_dcb_query_hw_capability,
+ .get_info = __qlcnic_dcb_get_info,
+
+ .get_hw_capability = qlcnic_83xx_dcb_get_hw_capability,
+ .query_cee_param = qlcnic_83xx_dcb_query_cee_param,
+ .get_cee_cfg = qlcnic_83xx_dcb_get_cee_cfg,
+ .aen_handler = qlcnic_83xx_dcb_aen_handler,
+};
+
+static const struct qlcnic_dcb_ops qlcnic_82xx_dcb_ops = {
+ .init_dcbnl_ops = __qlcnic_init_dcbnl_ops,
+ .free = __qlcnic_dcb_free,
+ .attach = __qlcnic_dcb_attach,
+ .query_hw_capability = __qlcnic_dcb_query_hw_capability,
+ .get_info = __qlcnic_dcb_get_info,
+
+ .get_hw_capability = qlcnic_82xx_dcb_get_hw_capability,
+ .query_cee_param = qlcnic_82xx_dcb_query_cee_param,
+ .get_cee_cfg = qlcnic_82xx_dcb_get_cee_cfg,
+ .aen_handler = qlcnic_82xx_dcb_aen_handler,
+};
+
+static u8 qlcnic_dcb_get_num_app(struct qlcnic_adapter *adapter, u32 val)
+{
+ if (qlcnic_82xx_check(adapter))
+ return QLC_82XX_DCB_GET_NUMAPP(val);
+ else
+ return QLC_83XX_DCB_GET_NUMAPP(val);
+}
+
+static inline u8 qlcnic_dcb_pfc_hdr_valid(struct qlcnic_adapter *adapter,
+ u32 val)
+{
+ if (qlcnic_82xx_check(adapter))
+ return QLC_82XX_DCB_PFC_VALID(val);
+ else
+ return QLC_83XX_DCB_PFC_VALID(val);
+}
+
+static inline u8 qlcnic_dcb_tsa_hdr_valid(struct qlcnic_adapter *adapter,
+ u32 val)
+{
+ if (qlcnic_82xx_check(adapter))
+ return QLC_82XX_DCB_TSA_VALID(val);
+ else
+ return QLC_83XX_DCB_TSA_VALID(val);
+}
+
+static inline u8 qlcnic_dcb_get_prio_map_app(struct qlcnic_adapter *adapter,
+ u32 val)
+{
+ if (qlcnic_82xx_check(adapter))
+ return QLC_82XX_DCB_GET_PRIOMAP_APP(val);
+ else
+ return QLC_83XX_DCB_GET_PRIOMAP_APP(val);
+}
+
+static int qlcnic_dcb_prio_count(u8 up_tc_map)
+{
+ int j;
+
+ for (j = 0; j < QLC_DCB_MAX_TC; j++)
+ if (up_tc_map & QLC_DCB_GET_MAP(j))
+ break;
+
+ return j;
+}
+
+static inline void __qlcnic_init_dcbnl_ops(struct qlcnic_dcb *dcb)
+{
+ if (test_bit(QLCNIC_DCB_STATE, &dcb->state))
+ dcb->adapter->netdev->dcbnl_ops = &qlcnic_dcbnl_ops;
+}
+
+static void qlcnic_set_dcb_ops(struct qlcnic_adapter *adapter)
+{
+ if (qlcnic_82xx_check(adapter))
+ adapter->dcb->ops = &qlcnic_82xx_dcb_ops;
+ else if (qlcnic_83xx_check(adapter))
+ adapter->dcb->ops = &qlcnic_83xx_dcb_ops;
+}
+
+int qlcnic_register_dcb(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_dcb *dcb;
+
+ if (qlcnic_sriov_vf_check(adapter))
+ return 0;
+
+ dcb = kzalloc(sizeof(struct qlcnic_dcb), GFP_ATOMIC);
+ if (!dcb)
+ return -ENOMEM;
+
+ adapter->dcb = dcb;
+ dcb->adapter = adapter;
+ qlcnic_set_dcb_ops(adapter);
+ dcb->state = 0;
+
+ return 0;
+}
+
+static void __qlcnic_dcb_free(struct qlcnic_dcb *dcb)
+{
+ struct qlcnic_adapter *adapter;
+
+ if (!dcb)
+ return;
+
+ adapter = dcb->adapter;
+
+ while (test_bit(QLCNIC_DCB_AEN_MODE, &dcb->state))
+ usleep_range(10000, 11000);
+
+ cancel_delayed_work_sync(&dcb->aen_work);
+
+ if (dcb->wq) {
+ destroy_workqueue(dcb->wq);
+ dcb->wq = NULL;
+ }
+
+ kfree(dcb->cfg);
+ dcb->cfg = NULL;
+ kfree(dcb->param);
+ dcb->param = NULL;
+ kfree(dcb);
+ adapter->dcb = NULL;
+}
+
+static void __qlcnic_dcb_get_info(struct qlcnic_dcb *dcb)
+{
+ qlcnic_dcb_get_hw_capability(dcb);
+ qlcnic_dcb_get_cee_cfg(dcb);
+}
+
+static int __qlcnic_dcb_attach(struct qlcnic_dcb *dcb)
+{
+ int err = 0;
+
+ INIT_DELAYED_WORK(&dcb->aen_work, qlcnic_dcb_aen_work);
+
+ dcb->wq = create_singlethread_workqueue("qlcnic-dcb");
+ if (!dcb->wq) {
+ dev_err(&dcb->adapter->pdev->dev,
+ "DCB workqueue allocation failed. DCB will be disabled\n");
+ return -1;
+ }
+
+ dcb->cfg = kzalloc(sizeof(struct qlcnic_dcb_cfg), GFP_ATOMIC);
+ if (!dcb->cfg) {
+ err = -ENOMEM;
+ goto out_free_wq;
+ }
+
+ dcb->param = kzalloc(sizeof(struct qlcnic_dcb_mbx_params), GFP_ATOMIC);
+ if (!dcb->param) {
+ err = -ENOMEM;
+ goto out_free_cfg;
+ }
+
+ return 0;
+out_free_cfg:
+ kfree(dcb->cfg);
+ dcb->cfg = NULL;
+
+out_free_wq:
+ destroy_workqueue(dcb->wq);
+ dcb->wq = NULL;
+
+ return err;
+}
+
+static int __qlcnic_dcb_query_hw_capability(struct qlcnic_dcb *dcb, char *buf)
+{
+ struct qlcnic_adapter *adapter = dcb->adapter;
+ struct qlcnic_cmd_args cmd;
+ u32 mbx_out;
+ int err;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DCB_QUERY_CAP);
+ if (err)
+ return err;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to query DCBX capability, err %d\n", err);
+ } else {
+ mbx_out = cmd.rsp.arg[1];
+ if (buf)
+ memcpy(buf, &mbx_out, sizeof(u32));
+ }
+
+ qlcnic_free_mbx_args(&cmd);
+
+ return err;
+}
+
+static int __qlcnic_dcb_get_capability(struct qlcnic_dcb *dcb, u32 *val)
+{
+ struct qlcnic_dcb_capability *cap = &dcb->cfg->capability;
+ u32 mbx_out;
+ int err;
+
+ memset(cap, 0, sizeof(struct qlcnic_dcb_capability));
+
+ err = qlcnic_dcb_query_hw_capability(dcb, (char *)val);
+ if (err)
+ return err;
+
+ mbx_out = *val;
+ if (QLC_DCB_TSA_SUPPORT(mbx_out))
+ cap->tsa_capability = true;
+
+ if (QLC_DCB_ETS_SUPPORT(mbx_out))
+ cap->ets_capability = true;
+
+ cap->max_num_tc = QLC_DCB_MAX_NUM_TC(mbx_out);
+ cap->max_ets_tc = QLC_DCB_MAX_NUM_ETS_TC(mbx_out);
+ cap->max_pfc_tc = QLC_DCB_MAX_NUM_PFC_TC(mbx_out);
+
+ if (cap->max_num_tc > QLC_DCB_MAX_TC ||
+ cap->max_ets_tc > cap->max_num_tc ||
+ cap->max_pfc_tc > cap->max_num_tc) {
+ dev_err(&dcb->adapter->pdev->dev, "Invalid DCB configuration\n");
+ return -EINVAL;
+ }
+
+ return err;
+}
+
+static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_dcb *dcb)
+{
+ struct qlcnic_dcb_cfg *cfg = dcb->cfg;
+ struct qlcnic_dcb_capability *cap;
+ u32 mbx_out;
+ int err;
+
+ err = __qlcnic_dcb_get_capability(dcb, &mbx_out);
+ if (err)
+ return err;
+
+ cap = &cfg->capability;
+ cap->dcb_capability = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_LLD_MANAGED;
+
+ if (cap->dcb_capability && cap->tsa_capability && cap->ets_capability)
+ set_bit(QLCNIC_DCB_STATE, &dcb->state);
+
+ return err;
+}
+
+static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_dcb *dcb,
+ char *buf, u8 type)
+{
+ u16 size = sizeof(struct qlcnic_82xx_dcb_param_mbx_le);
+ struct qlcnic_adapter *adapter = dcb->adapter;
+ struct qlcnic_82xx_dcb_param_mbx_le *prsp_le;
+ struct device *dev = &adapter->pdev->dev;
+ dma_addr_t cardrsp_phys_addr;
+ struct qlcnic_dcb_param rsp;
+ struct qlcnic_cmd_args cmd;
+ u64 phys_addr;
+ void *addr;
+ int err, i;
+
+ switch (type) {
+ case QLC_DCB_LOCAL_PARAM_FWID:
+ case QLC_DCB_OPER_PARAM_FWID:
+ case QLC_DCB_PEER_PARAM_FWID:
+ break;
+ default:
+ dev_err(dev, "Invalid parameter type %d\n", type);
+ return -EINVAL;
+ }
+
+ addr = dma_alloc_coherent(dev, size, &cardrsp_phys_addr, GFP_KERNEL);
+ if (addr == NULL)
+ return -ENOMEM;
+
+ prsp_le = addr;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DCB_QUERY_PARAM);
+ if (err)
+ goto out_free_rsp;
+
+ phys_addr = cardrsp_phys_addr;
+ cmd.req.arg[1] = size | (type << 16);
+ cmd.req.arg[2] = MSD(phys_addr);
+ cmd.req.arg[3] = LSD(phys_addr);
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ dev_err(dev, "Failed to query DCBX parameter, err %d\n", err);
+ goto out;
+ }
+
+ memset(&rsp, 0, sizeof(struct qlcnic_dcb_param));
+ rsp.hdr_prio_pfc_map[0] = le32_to_cpu(prsp_le->hdr_prio_pfc_map[0]);
+ rsp.hdr_prio_pfc_map[1] = le32_to_cpu(prsp_le->hdr_prio_pfc_map[1]);
+ rsp.prio_pg_map[0] = le32_to_cpu(prsp_le->prio_pg_map[0]);
+ rsp.prio_pg_map[1] = le32_to_cpu(prsp_le->prio_pg_map[1]);
+ rsp.pg_bw_map[0] = le32_to_cpu(prsp_le->pg_bw_map[0]);
+ rsp.pg_bw_map[1] = le32_to_cpu(prsp_le->pg_bw_map[1]);
+ rsp.pg_tsa_map[0] = le32_to_cpu(prsp_le->pg_tsa_map[0]);
+ rsp.pg_tsa_map[1] = le32_to_cpu(prsp_le->pg_tsa_map[1]);
+
+ for (i = 0; i < QLC_DCB_MAX_APP; i++)
+ rsp.app[i] = le32_to_cpu(prsp_le->app[i]);
+
+ if (buf)
+ memcpy(buf, &rsp, size);
+out:
+ qlcnic_free_mbx_args(&cmd);
+
+out_free_rsp:
+ dma_free_coherent(dev, size, addr, cardrsp_phys_addr);
+
+ return err;
+}
+
+static int qlcnic_82xx_dcb_get_cee_cfg(struct qlcnic_dcb *dcb)
+{
+ struct qlcnic_dcb_mbx_params *mbx;
+ int err;
+
+ mbx = dcb->param;
+ if (!mbx)
+ return 0;
+
+ err = qlcnic_dcb_query_cee_param(dcb, (char *)&mbx->type[0],
+ QLC_DCB_LOCAL_PARAM_FWID);
+ if (err)
+ return err;
+
+ err = qlcnic_dcb_query_cee_param(dcb, (char *)&mbx->type[1],
+ QLC_DCB_OPER_PARAM_FWID);
+ if (err)
+ return err;
+
+ err = qlcnic_dcb_query_cee_param(dcb, (char *)&mbx->type[2],
+ QLC_DCB_PEER_PARAM_FWID);
+ if (err)
+ return err;
+
+ mbx->prio_tc_map = QLC_82XX_DCB_PRIO_TC_MAP;
+
+ qlcnic_dcb_data_cee_param_map(dcb->adapter);
+
+ return err;
+}
+
+static void qlcnic_dcb_aen_work(struct work_struct *work)
+{
+ struct qlcnic_dcb *dcb;
+
+ dcb = container_of(work, struct qlcnic_dcb, aen_work.work);
+
+ qlcnic_dcb_get_cee_cfg(dcb);
+ clear_bit(QLCNIC_DCB_AEN_MODE, &dcb->state);
+}
+
+static void qlcnic_82xx_dcb_aen_handler(struct qlcnic_dcb *dcb, void *data)
+{
+ if (test_and_set_bit(QLCNIC_DCB_AEN_MODE, &dcb->state))
+ return;
+
+ queue_delayed_work(dcb->wq, &dcb->aen_work, 0);
+}
+
+static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_dcb *dcb)
+{
+ struct qlcnic_dcb_capability *cap = &dcb->cfg->capability;
+ u32 mbx_out;
+ int err;
+
+ err = __qlcnic_dcb_get_capability(dcb, &mbx_out);
+ if (err)
+ return err;
+
+ if (mbx_out & BIT_2)
+ cap->dcb_capability = DCB_CAP_DCBX_VER_CEE;
+ if (mbx_out & BIT_3)
+ cap->dcb_capability |= DCB_CAP_DCBX_VER_IEEE;
+ if (cap->dcb_capability)
+ cap->dcb_capability |= DCB_CAP_DCBX_LLD_MANAGED;
+
+ if (cap->dcb_capability && cap->tsa_capability && cap->ets_capability)
+ set_bit(QLCNIC_DCB_STATE, &dcb->state);
+
+ return err;
+}
+
+static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_dcb *dcb,
+ char *buf, u8 idx)
+{
+ struct qlcnic_adapter *adapter = dcb->adapter;
+ struct qlcnic_dcb_mbx_params mbx_out;
+ int err, i, j, k, max_app, size;
+ struct qlcnic_dcb_param *each;
+ struct qlcnic_cmd_args cmd;
+ u32 val;
+ char *p;
+
+ size = 0;
+ memset(&mbx_out, 0, sizeof(struct qlcnic_dcb_mbx_params));
+ memset(buf, 0, sizeof(struct qlcnic_dcb_mbx_params));
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DCB_QUERY_PARAM);
+ if (err)
+ return err;
+
+ cmd.req.arg[0] |= QLC_DCB_FW_VER << 29;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to query DCBX param, err %d\n", err);
+ goto out;
+ }
+
+ mbx_out.prio_tc_map = cmd.rsp.arg[1];
+ p = memcpy(buf, &mbx_out, sizeof(u32));
+ k = 2;
+ p += sizeof(u32);
+
+ for (j = 0; j < QLC_DCB_NUM_PARAM; j++) {
+ each = &mbx_out.type[j];
+
+ each->hdr_prio_pfc_map[0] = cmd.rsp.arg[k++];
+ each->hdr_prio_pfc_map[1] = cmd.rsp.arg[k++];
+ each->prio_pg_map[0] = cmd.rsp.arg[k++];
+ each->prio_pg_map[1] = cmd.rsp.arg[k++];
+ each->pg_bw_map[0] = cmd.rsp.arg[k++];
+ each->pg_bw_map[1] = cmd.rsp.arg[k++];
+ each->pg_tsa_map[0] = cmd.rsp.arg[k++];
+ each->pg_tsa_map[1] = cmd.rsp.arg[k++];
+ val = each->hdr_prio_pfc_map[0];
+
+ max_app = qlcnic_dcb_get_num_app(adapter, val);
+ for (i = 0; i < max_app; i++)
+ each->app[i] = cmd.rsp.arg[i + k];
+
+ size = 16 * sizeof(u32);
+ memcpy(p, &each->hdr_prio_pfc_map[0], size);
+ p += size;
+ if (j == 0)
+ k = 18;
+ else
+ k = 34;
+ }
+out:
+ qlcnic_free_mbx_args(&cmd);
+
+ return err;
+}
+
+static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_dcb *dcb)
+{
+ int err;
+
+ err = qlcnic_dcb_query_cee_param(dcb, (char *)dcb->param, 0);
+ if (err)
+ return err;
+
+ qlcnic_dcb_data_cee_param_map(dcb->adapter);
+
+ return err;
+}
+
+static void qlcnic_83xx_dcb_aen_handler(struct qlcnic_dcb *dcb, void *data)
+{
+ u32 *val = data;
+
+ if (test_and_set_bit(QLCNIC_DCB_AEN_MODE, &dcb->state))
+ return;
+
+ if (*val & BIT_8)
+ set_bit(QLCNIC_DCB_STATE, &dcb->state);
+ else
+ clear_bit(QLCNIC_DCB_STATE, &dcb->state);
+
+ queue_delayed_work(dcb->wq, &dcb->aen_work, 0);
+}
+
+static void qlcnic_dcb_fill_cee_tc_params(struct qlcnic_dcb_mbx_params *mbx,
+ struct qlcnic_dcb_param *each,
+ struct qlcnic_dcb_cee *type)
+{
+ struct qlcnic_dcb_tc_cfg *tc_cfg;
+ u8 i, tc, pgid;
+
+ for (i = 0; i < QLC_DCB_MAX_PRIO; i++) {
+ tc = QLC_DCB_GET_TC_PRIO(mbx->prio_tc_map, i);
+ tc_cfg = &type->tc_cfg[tc];
+ tc_cfg->valid = true;
+ tc_cfg->up_tc_map |= QLC_DCB_GET_MAP(i);
+
+ if (QLC_DCB_GET_PFC_PRIO(each->hdr_prio_pfc_map[1], i) &&
+ type->pfc_mode_enable) {
+ tc_cfg->prio_cfg[i].valid = true;
+ tc_cfg->prio_cfg[i].pfc_type = QLC_PFC_FULL;
+ }
+
+ if (i < 4)
+ pgid = QLC_DCB_GET_PGID_PRIO(each->prio_pg_map[0], i);
+ else
+ pgid = QLC_DCB_GET_PGID_PRIO(each->prio_pg_map[1], i);
+
+ tc_cfg->pgid = pgid;
+
+ tc_cfg->prio_type = QLC_PRIO_LINK;
+ type->pg_cfg[tc_cfg->pgid].prio_count++;
+ }
+}
+
+static void qlcnic_dcb_fill_cee_pg_params(struct qlcnic_dcb_param *each,
+ struct qlcnic_dcb_cee *type)
+{
+ struct qlcnic_dcb_pg_cfg *pg_cfg;
+ u8 i, tsa, bw_per;
+
+ for (i = 0; i < QLC_DCB_MAX_PG; i++) {
+ pg_cfg = &type->pg_cfg[i];
+ pg_cfg->valid = true;
+
+ if (i < 4) {
+ bw_per = QLC_DCB_GET_BWPER_PG(each->pg_bw_map[0], i);
+ tsa = QLC_DCB_GET_TSA_PG(each->pg_tsa_map[0], i);
+ } else {
+ bw_per = QLC_DCB_GET_BWPER_PG(each->pg_bw_map[1], i);
+ tsa = QLC_DCB_GET_TSA_PG(each->pg_tsa_map[1], i);
+ }
+
+ pg_cfg->total_bw_percent = bw_per;
+ pg_cfg->tsa_type = tsa;
+ }
+}
+
+static void
+qlcnic_dcb_fill_cee_app_params(struct qlcnic_adapter *adapter, u8 idx,
+ struct qlcnic_dcb_param *each,
+ struct qlcnic_dcb_cee *type)
+{
+ struct qlcnic_dcb_app *app;
+ u8 i, num_app, map, cnt;
+ struct dcb_app new_app;
+
+ num_app = qlcnic_dcb_get_num_app(adapter, each->hdr_prio_pfc_map[0]);
+ for (i = 0; i < num_app; i++) {
+ app = &type->app[i];
+ app->valid = true;
+
+ /* Only for CEE (-1) */
+ app->selector = QLC_DCB_GET_SELECTOR_APP(each->app[i]) - 1;
+ new_app.selector = app->selector;
+ app->protocol = QLC_DCB_GET_PROTO_ID_APP(each->app[i]);
+ new_app.protocol = app->protocol;
+ map = qlcnic_dcb_get_prio_map_app(adapter, each->app[i]);
+ cnt = qlcnic_dcb_prio_count(map);
+
+ if (cnt >= QLC_DCB_MAX_TC)
+ cnt = 0;
+
+ app->priority = cnt;
+ new_app.priority = cnt;
+
+ if (idx == QLC_DCB_OPER_IDX && adapter->netdev->dcbnl_ops)
+ dcb_setapp(adapter->netdev, &new_app);
+ }
+}
+
+static void qlcnic_dcb_map_cee_params(struct qlcnic_adapter *adapter, u8 idx)
+{
+ struct qlcnic_dcb_mbx_params *mbx = adapter->dcb->param;
+ struct qlcnic_dcb_param *each = &mbx->type[idx];
+ struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg;
+ struct qlcnic_dcb_cee *type = &cfg->type[idx];
+
+ type->tc_param_valid = false;
+ type->pfc_mode_enable = false;
+ memset(type->tc_cfg, 0,
+ sizeof(struct qlcnic_dcb_tc_cfg) * QLC_DCB_MAX_TC);
+ memset(type->pg_cfg, 0,
+ sizeof(struct qlcnic_dcb_pg_cfg) * QLC_DCB_MAX_TC);
+
+ if (qlcnic_dcb_pfc_hdr_valid(adapter, each->hdr_prio_pfc_map[0]) &&
+ cfg->capability.max_pfc_tc)
+ type->pfc_mode_enable = true;
+
+ if (qlcnic_dcb_tsa_hdr_valid(adapter, each->hdr_prio_pfc_map[0]) &&
+ cfg->capability.max_ets_tc)
+ type->tc_param_valid = true;
+
+ qlcnic_dcb_fill_cee_tc_params(mbx, each, type);
+ qlcnic_dcb_fill_cee_pg_params(each, type);
+ qlcnic_dcb_fill_cee_app_params(adapter, idx, each, type);
+}
+
+static void qlcnic_dcb_data_cee_param_map(struct qlcnic_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < QLC_DCB_NUM_PARAM; i++)
+ qlcnic_dcb_map_cee_params(adapter, i);
+
+ dcbnl_cee_notify(adapter->netdev, RTM_GETDCB, DCB_CMD_CEE_GET, 0, 0);
+}
+
+static u8 qlcnic_dcb_get_state(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ return test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state);
+}
+
+static void qlcnic_dcb_get_perm_hw_addr(struct net_device *netdev, u8 *addr)
+{
+ memcpy(addr, netdev->perm_addr, netdev->addr_len);
+}
+
+static void
+qlcnic_dcb_get_pg_tc_cfg_tx(struct net_device *netdev, int tc, u8 *prio,
+ u8 *pgid, u8 *bw_per, u8 *up_tc_map)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb_tc_cfg *tc_cfg, *temp;
+ struct qlcnic_dcb_cee *type;
+ u8 i, cnt, pg;
+
+ type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
+ *prio = *pgid = *bw_per = *up_tc_map = 0;
+
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state) ||
+ !type->tc_param_valid)
+ return;
+
+ if (tc < 0 || (tc >= QLC_DCB_MAX_TC))
+ return;
+
+ tc_cfg = &type->tc_cfg[tc];
+ if (!tc_cfg->valid)
+ return;
+
+ *pgid = tc_cfg->pgid;
+ *prio = tc_cfg->prio_type;
+ *up_tc_map = tc_cfg->up_tc_map;
+ pg = *pgid;
+
+ for (i = 0, cnt = 0; i < QLC_DCB_MAX_TC; i++) {
+ temp = &type->tc_cfg[i];
+ if (temp->valid && (pg == temp->pgid))
+ cnt++;
+ }
+
+ tc_cfg->bwg_percent = (100 / cnt);
+ *bw_per = tc_cfg->bwg_percent;
+}
+
+static void qlcnic_dcb_get_pg_bwg_cfg_tx(struct net_device *netdev, int pgid,
+ u8 *bw_pct)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb_pg_cfg *pgcfg;
+ struct qlcnic_dcb_cee *type;
+
+ *bw_pct = 0;
+ type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
+
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state) ||
+ !type->tc_param_valid)
+ return;
+
+ if (pgid < 0 || pgid >= QLC_DCB_MAX_PG)
+ return;
+
+ pgcfg = &type->pg_cfg[pgid];
+ if (!pgcfg->valid)
+ return;
+
+ *bw_pct = pgcfg->total_bw_percent;
+}
+
+static void qlcnic_dcb_get_pfc_cfg(struct net_device *netdev, int prio,
+ u8 *setting)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb_tc_cfg *tc_cfg;
+ u8 val = QLC_DCB_GET_MAP(prio);
+ struct qlcnic_dcb_cee *type;
+ u8 i;
+
+ *setting = 0;
+ type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
+
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state) ||
+ !type->pfc_mode_enable)
+ return;
+
+ for (i = 0; i < QLC_DCB_MAX_TC; i++) {
+ tc_cfg = &type->tc_cfg[i];
+ if (!tc_cfg->valid)
+ continue;
+
+ if ((val & tc_cfg->up_tc_map) && (tc_cfg->prio_cfg[prio].valid))
+ *setting = tc_cfg->prio_cfg[prio].pfc_type;
+ }
+}
+
+static u8 qlcnic_dcb_get_capability(struct net_device *netdev, int capid,
+ u8 *cap)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
+ return 1;
+
+ switch (capid) {
+ case DCB_CAP_ATTR_PG:
+ case DCB_CAP_ATTR_UP2TC:
+ case DCB_CAP_ATTR_PFC:
+ case DCB_CAP_ATTR_GSP:
+ *cap = true;
+ break;
+ case DCB_CAP_ATTR_PG_TCS:
+ case DCB_CAP_ATTR_PFC_TCS:
+ *cap = 0x80; /* 8 priorities for PGs */
+ break;
+ case DCB_CAP_ATTR_DCBX:
+ *cap = adapter->dcb->cfg->capability.dcb_capability;
+ break;
+ default:
+ *cap = false;
+ }
+
+ return 0;
+}
+
+static int qlcnic_dcb_get_num_tcs(struct net_device *netdev, int attr, u8 *num)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg;
+
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
+ return -EINVAL;
+
+ switch (attr) {
+ case DCB_NUMTCS_ATTR_PG:
+ *num = cfg->capability.max_ets_tc;
+ return 0;
+ case DCB_NUMTCS_ATTR_PFC:
+ *num = cfg->capability.max_pfc_tc;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int qlcnic_dcb_get_app(struct net_device *netdev, u8 idtype, u16 id)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct dcb_app app = {
+ .selector = idtype,
+ .protocol = id,
+ };
+
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
+ return -EINVAL;
+
+ return dcb_getapp(netdev, &app);
+}
+
+static u8 qlcnic_dcb_get_pfc_state(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb *dcb = adapter->dcb;
+
+ if (!test_bit(QLCNIC_DCB_STATE, &dcb->state))
+ return 0;
+
+ return dcb->cfg->type[QLC_DCB_OPER_IDX].pfc_mode_enable;
+}
+
+static u8 qlcnic_dcb_get_dcbx(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg;
+
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
+ return 0;
+
+ return cfg->capability.dcb_capability;
+}
+
+static u8 qlcnic_dcb_get_feat_cfg(struct net_device *netdev, int fid, u8 *flag)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb_cee *type;
+
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
+ return 1;
+
+ type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
+ *flag = 0;
+
+ switch (fid) {
+ case DCB_FEATCFG_ATTR_PG:
+ if (type->tc_param_valid)
+ *flag |= DCB_FEATCFG_ENABLE;
+ else
+ *flag |= DCB_FEATCFG_ERROR;
+ break;
+ case DCB_FEATCFG_ATTR_PFC:
+ if (type->pfc_mode_enable) {
+ if (type->tc_cfg[0].prio_cfg[0].pfc_type)
+ *flag |= DCB_FEATCFG_ENABLE;
+ } else {
+ *flag |= DCB_FEATCFG_ERROR;
+ }
+ break;
+ case DCB_FEATCFG_ATTR_APP:
+ *flag |= DCB_FEATCFG_ENABLE;
+ break;
+ default:
+ netdev_err(netdev, "Invalid Feature ID %d\n", fid);
+ return 1;
+ }
+
+ return 0;
+}
+
+static inline void
+qlcnic_dcb_get_pg_tc_cfg_rx(struct net_device *netdev, int prio, u8 *prio_type,
+ u8 *pgid, u8 *bw_pct, u8 *up_map)
+{
+ *prio_type = *pgid = *bw_pct = *up_map = 0;
+}
+
+static inline void
+qlcnic_dcb_get_pg_bwg_cfg_rx(struct net_device *netdev, int pgid, u8 *bw_pct)
+{
+ *bw_pct = 0;
+}
+
+static int qlcnic_dcb_peer_app_info(struct net_device *netdev,
+ struct dcb_peer_app_info *info,
+ u16 *app_count)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb_cee *peer;
+ int i;
+
+ memset(info, 0, sizeof(*info));
+ *app_count = 0;
+
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
+ return 0;
+
+ peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX];
+
+ for (i = 0; i < QLC_DCB_MAX_APP; i++) {
+ if (peer->app[i].valid)
+ (*app_count)++;
+ }
+
+ return 0;
+}
+
+static int qlcnic_dcb_peer_app_table(struct net_device *netdev,
+ struct dcb_app *table)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb_cee *peer;
+ struct qlcnic_dcb_app *app;
+ int i, j;
+
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
+ return 0;
+
+ peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX];
+
+ for (i = 0, j = 0; i < QLC_DCB_MAX_APP; i++) {
+ app = &peer->app[i];
+ if (!app->valid)
+ continue;
+
+ table[j].selector = app->selector;
+ table[j].priority = app->priority;
+ table[j++].protocol = app->protocol;
+ }
+
+ return 0;
+}
+
+static int qlcnic_dcb_cee_peer_get_pg(struct net_device *netdev,
+ struct cee_pg *pg)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb_cee *peer;
+ u8 i, j, k, map;
+
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
+ return 0;
+
+ peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX];
+
+ for (i = 0, j = 0; i < QLC_DCB_MAX_PG; i++) {
+ if (!peer->pg_cfg[i].valid)
+ continue;
+
+ pg->pg_bw[j] = peer->pg_cfg[i].total_bw_percent;
+
+ for (k = 0; k < QLC_DCB_MAX_TC; k++) {
+ if (peer->tc_cfg[i].valid &&
+ (peer->tc_cfg[i].pgid == i)) {
+ map = peer->tc_cfg[i].up_tc_map;
+ pg->prio_pg[j++] = map;
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int qlcnic_dcb_cee_peer_get_pfc(struct net_device *netdev,
+ struct cee_pfc *pfc)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg;
+ struct qlcnic_dcb_tc_cfg *tc;
+ struct qlcnic_dcb_cee *peer;
+ u8 i, setting, prio;
+
+ pfc->pfc_en = 0;
+
+ if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
+ return 0;
+
+ peer = &cfg->type[QLC_DCB_PEER_IDX];
+
+ for (i = 0; i < QLC_DCB_MAX_TC; i++) {
+ tc = &peer->tc_cfg[i];
+ prio = qlcnic_dcb_prio_count(tc->up_tc_map);
+
+ setting = 0;
+ qlcnic_dcb_get_pfc_cfg(netdev, prio, &setting);
+ if (setting)
+ pfc->pfc_en |= QLC_DCB_GET_MAP(i);
+ }
+
+ pfc->tcs_supported = cfg->capability.max_pfc_tc;
+
+ return 0;
+}
+
+static const struct dcbnl_rtnl_ops qlcnic_dcbnl_ops = {
+ .getstate = qlcnic_dcb_get_state,
+ .getpermhwaddr = qlcnic_dcb_get_perm_hw_addr,
+ .getpgtccfgtx = qlcnic_dcb_get_pg_tc_cfg_tx,
+ .getpgbwgcfgtx = qlcnic_dcb_get_pg_bwg_cfg_tx,
+ .getpfccfg = qlcnic_dcb_get_pfc_cfg,
+ .getcap = qlcnic_dcb_get_capability,
+ .getnumtcs = qlcnic_dcb_get_num_tcs,
+ .getapp = qlcnic_dcb_get_app,
+ .getpfcstate = qlcnic_dcb_get_pfc_state,
+ .getdcbx = qlcnic_dcb_get_dcbx,
+ .getfeatcfg = qlcnic_dcb_get_feat_cfg,
+
+ .getpgtccfgrx = qlcnic_dcb_get_pg_tc_cfg_rx,
+ .getpgbwgcfgrx = qlcnic_dcb_get_pg_bwg_cfg_rx,
+
+ .peer_getappinfo = qlcnic_dcb_peer_app_info,
+ .peer_getapptable = qlcnic_dcb_peer_app_table,
+ .cee_peer_getpg = qlcnic_dcb_cee_peer_get_pg,
+ .cee_peer_getpfc = qlcnic_dcb_cee_peer_get_pfc,
+};
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
new file mode 100644
index 000000000..22afa2be8
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ */
+
+#ifndef __QLCNIC_DCBX_H
+#define __QLCNIC_DCBX_H
+
+#define QLCNIC_DCB_STATE 0
+#define QLCNIC_DCB_AEN_MODE 1
+
+#ifdef CONFIG_QLCNIC_DCB
+int qlcnic_register_dcb(struct qlcnic_adapter *);
+#else
+static inline int qlcnic_register_dcb(struct qlcnic_adapter *adapter)
+{ return 0; }
+#endif
+
+struct qlcnic_dcb;
+
+struct qlcnic_dcb_ops {
+ int (*query_hw_capability) (struct qlcnic_dcb *, char *);
+ int (*get_hw_capability) (struct qlcnic_dcb *);
+ int (*query_cee_param) (struct qlcnic_dcb *, char *, u8);
+ void (*init_dcbnl_ops) (struct qlcnic_dcb *);
+ void (*aen_handler) (struct qlcnic_dcb *, void *);
+ int (*get_cee_cfg) (struct qlcnic_dcb *);
+ void (*get_info) (struct qlcnic_dcb *);
+ int (*attach) (struct qlcnic_dcb *);
+ void (*free) (struct qlcnic_dcb *);
+};
+
+struct qlcnic_dcb {
+ struct qlcnic_dcb_mbx_params *param;
+ struct qlcnic_adapter *adapter;
+ struct delayed_work aen_work;
+ struct workqueue_struct *wq;
+ const struct qlcnic_dcb_ops *ops;
+ struct qlcnic_dcb_cfg *cfg;
+ unsigned long state;
+};
+
+static inline int qlcnic_dcb_get_hw_capability(struct qlcnic_dcb *dcb)
+{
+ if (dcb && dcb->ops->get_hw_capability)
+ return dcb->ops->get_hw_capability(dcb);
+
+ return -EOPNOTSUPP;
+}
+
+static inline void qlcnic_dcb_free(struct qlcnic_dcb *dcb)
+{
+ if (dcb && dcb->ops->free)
+ dcb->ops->free(dcb);
+}
+
+static inline int qlcnic_dcb_attach(struct qlcnic_dcb *dcb)
+{
+ if (dcb && dcb->ops->attach)
+ return dcb->ops->attach(dcb);
+
+ return -EOPNOTSUPP;
+}
+
+static inline int
+qlcnic_dcb_query_hw_capability(struct qlcnic_dcb *dcb, char *buf)
+{
+ if (dcb && dcb->ops->query_hw_capability)
+ return dcb->ops->query_hw_capability(dcb, buf);
+
+ return -EOPNOTSUPP;
+}
+
+static inline void qlcnic_dcb_get_info(struct qlcnic_dcb *dcb)
+{
+ if (dcb && dcb->ops->get_info)
+ dcb->ops->get_info(dcb);
+}
+
+static inline int
+qlcnic_dcb_query_cee_param(struct qlcnic_dcb *dcb, char *buf, u8 type)
+{
+ if (dcb && dcb->ops->query_cee_param)
+ return dcb->ops->query_cee_param(dcb, buf, type);
+
+ return -EOPNOTSUPP;
+}
+
+static inline int qlcnic_dcb_get_cee_cfg(struct qlcnic_dcb *dcb)
+{
+ if (dcb && dcb->ops->get_cee_cfg)
+ return dcb->ops->get_cee_cfg(dcb);
+
+ return -EOPNOTSUPP;
+}
+
+static inline void qlcnic_dcb_aen_handler(struct qlcnic_dcb *dcb, void *msg)
+{
+ if (dcb && dcb->ops->aen_handler)
+ dcb->ops->aen_handler(dcb, msg);
+}
+
+static inline void qlcnic_dcb_init_dcbnl_ops(struct qlcnic_dcb *dcb)
+{
+ if (dcb && dcb->ops->init_dcbnl_ops)
+ dcb->ops->init_dcbnl_ops(dcb);
+}
+
+static inline int qlcnic_dcb_enable(struct qlcnic_dcb *dcb)
+{
+ return dcb ? qlcnic_dcb_attach(dcb) : 0;
+}
+#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
new file mode 100644
index 000000000..1ee491f78
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -0,0 +1,1890 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ */
+
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+
+#include "qlcnic.h"
+
+struct qlcnic_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int stat_offset;
+};
+
+#define QLC_SIZEOF(m) sizeof_field(struct qlcnic_adapter, m)
+#define QLC_OFF(m) offsetof(struct qlcnic_adapter, m)
+static const u32 qlcnic_fw_dump_level[] = {
+ 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff
+};
+
+static const struct qlcnic_stats qlcnic_gstrings_stats[] = {
+ {"xmit_on", QLC_SIZEOF(stats.xmit_on), QLC_OFF(stats.xmit_on)},
+ {"xmit_off", QLC_SIZEOF(stats.xmit_off), QLC_OFF(stats.xmit_off)},
+ {"xmit_called", QLC_SIZEOF(stats.xmitcalled),
+ QLC_OFF(stats.xmitcalled)},
+ {"xmit_finished", QLC_SIZEOF(stats.xmitfinished),
+ QLC_OFF(stats.xmitfinished)},
+ {"tx dma map error", QLC_SIZEOF(stats.tx_dma_map_error),
+ QLC_OFF(stats.tx_dma_map_error)},
+ {"tx_bytes", QLC_SIZEOF(stats.txbytes), QLC_OFF(stats.txbytes)},
+ {"tx_dropped", QLC_SIZEOF(stats.txdropped), QLC_OFF(stats.txdropped)},
+ {"rx dma map error", QLC_SIZEOF(stats.rx_dma_map_error),
+ QLC_OFF(stats.rx_dma_map_error)},
+ {"rx_pkts", QLC_SIZEOF(stats.rx_pkts), QLC_OFF(stats.rx_pkts)},
+ {"rx_bytes", QLC_SIZEOF(stats.rxbytes), QLC_OFF(stats.rxbytes)},
+ {"rx_dropped", QLC_SIZEOF(stats.rxdropped), QLC_OFF(stats.rxdropped)},
+ {"null rxbuf", QLC_SIZEOF(stats.null_rxbuf), QLC_OFF(stats.null_rxbuf)},
+ {"csummed", QLC_SIZEOF(stats.csummed), QLC_OFF(stats.csummed)},
+ {"lro_pkts", QLC_SIZEOF(stats.lro_pkts), QLC_OFF(stats.lro_pkts)},
+ {"lrobytes", QLC_SIZEOF(stats.lrobytes), QLC_OFF(stats.lrobytes)},
+ {"lso_frames", QLC_SIZEOF(stats.lso_frames), QLC_OFF(stats.lso_frames)},
+ {"encap_lso_frames", QLC_SIZEOF(stats.encap_lso_frames),
+ QLC_OFF(stats.encap_lso_frames)},
+ {"encap_tx_csummed", QLC_SIZEOF(stats.encap_tx_csummed),
+ QLC_OFF(stats.encap_tx_csummed)},
+ {"encap_rx_csummed", QLC_SIZEOF(stats.encap_rx_csummed),
+ QLC_OFF(stats.encap_rx_csummed)},
+ {"skb_alloc_failure", QLC_SIZEOF(stats.skb_alloc_failure),
+ QLC_OFF(stats.skb_alloc_failure)},
+ {"mac_filter_limit_overrun", QLC_SIZEOF(stats.mac_filter_limit_overrun),
+ QLC_OFF(stats.mac_filter_limit_overrun)},
+ {"spurious intr", QLC_SIZEOF(stats.spurious_intr),
+ QLC_OFF(stats.spurious_intr)},
+ {"mbx spurious intr", QLC_SIZEOF(stats.mbx_spurious_intr),
+ QLC_OFF(stats.mbx_spurious_intr)},
+};
+
+static const char qlcnic_device_gstrings_stats[][ETH_GSTRING_LEN] = {
+ "tx unicast frames",
+ "tx multicast frames",
+ "tx broadcast frames",
+ "tx dropped frames",
+ "tx errors",
+ "tx local frames",
+ "tx numbytes",
+ "rx unicast frames",
+ "rx multicast frames",
+ "rx broadcast frames",
+ "rx dropped frames",
+ "rx errors",
+ "rx local frames",
+ "rx numbytes",
+};
+
+static const char qlcnic_83xx_tx_stats_strings[][ETH_GSTRING_LEN] = {
+ "ctx_tx_bytes",
+ "ctx_tx_pkts",
+ "ctx_tx_errors",
+ "ctx_tx_dropped_pkts",
+ "ctx_tx_num_buffers",
+};
+
+static const char qlcnic_83xx_mac_stats_strings[][ETH_GSTRING_LEN] = {
+ "mac_tx_frames",
+ "mac_tx_bytes",
+ "mac_tx_mcast_pkts",
+ "mac_tx_bcast_pkts",
+ "mac_tx_pause_cnt",
+ "mac_tx_ctrl_pkt",
+ "mac_tx_lt_64b_pkts",
+ "mac_tx_lt_127b_pkts",
+ "mac_tx_lt_255b_pkts",
+ "mac_tx_lt_511b_pkts",
+ "mac_tx_lt_1023b_pkts",
+ "mac_tx_lt_1518b_pkts",
+ "mac_tx_gt_1518b_pkts",
+ "mac_rx_frames",
+ "mac_rx_bytes",
+ "mac_rx_mcast_pkts",
+ "mac_rx_bcast_pkts",
+ "mac_rx_pause_cnt",
+ "mac_rx_ctrl_pkt",
+ "mac_rx_lt_64b_pkts",
+ "mac_rx_lt_127b_pkts",
+ "mac_rx_lt_255b_pkts",
+ "mac_rx_lt_511b_pkts",
+ "mac_rx_lt_1023b_pkts",
+ "mac_rx_lt_1518b_pkts",
+ "mac_rx_gt_1518b_pkts",
+ "mac_rx_length_error",
+ "mac_rx_length_small",
+ "mac_rx_length_large",
+ "mac_rx_jabber",
+ "mac_rx_dropped",
+ "mac_crc_error",
+ "mac_align_error",
+ "eswitch_frames",
+ "eswitch_bytes",
+ "eswitch_multicast_frames",
+ "eswitch_broadcast_frames",
+ "eswitch_unicast_frames",
+ "eswitch_error_free_frames",
+ "eswitch_error_free_bytes",
+};
+
+#define QLCNIC_STATS_LEN ARRAY_SIZE(qlcnic_gstrings_stats)
+
+static const char qlcnic_tx_queue_stats_strings[][ETH_GSTRING_LEN] = {
+ "xmit_on",
+ "xmit_off",
+ "xmit_called",
+ "xmit_finished",
+ "tx_bytes",
+};
+
+#define QLCNIC_TX_STATS_LEN ARRAY_SIZE(qlcnic_tx_queue_stats_strings)
+
+static const char qlcnic_83xx_rx_stats_strings[][ETH_GSTRING_LEN] = {
+ "ctx_rx_bytes",
+ "ctx_rx_pkts",
+ "ctx_lro_pkt_cnt",
+ "ctx_ip_csum_error",
+ "ctx_rx_pkts_wo_ctx",
+ "ctx_rx_pkts_drop_wo_sds_on_card",
+ "ctx_rx_pkts_drop_wo_sds_on_host",
+ "ctx_rx_osized_pkts",
+ "ctx_rx_pkts_dropped_wo_rds",
+ "ctx_rx_unexpected_mcast_pkts",
+ "ctx_invalid_mac_address",
+ "ctx_rx_rds_ring_prim_attempted",
+ "ctx_rx_rds_ring_prim_success",
+ "ctx_num_lro_flows_added",
+ "ctx_num_lro_flows_removed",
+ "ctx_num_lro_flows_active",
+ "ctx_pkts_dropped_unknown",
+};
+
+static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = {
+ "Register_Test_on_offline",
+ "Link_Test_on_offline",
+ "Interrupt_Test_offline",
+ "Internal_Loopback_offline",
+ "External_Loopback_offline",
+ "EEPROM_Test_offline"
+};
+
+#define QLCNIC_TEST_LEN ARRAY_SIZE(qlcnic_gstrings_test)
+
+static inline int qlcnic_82xx_statistics(struct qlcnic_adapter *adapter)
+{
+ return ARRAY_SIZE(qlcnic_gstrings_stats) +
+ ARRAY_SIZE(qlcnic_83xx_mac_stats_strings) +
+ QLCNIC_TX_STATS_LEN * adapter->drv_tx_rings;
+}
+
+static inline int qlcnic_83xx_statistics(struct qlcnic_adapter *adapter)
+{
+ return ARRAY_SIZE(qlcnic_gstrings_stats) +
+ ARRAY_SIZE(qlcnic_83xx_tx_stats_strings) +
+ ARRAY_SIZE(qlcnic_83xx_mac_stats_strings) +
+ ARRAY_SIZE(qlcnic_83xx_rx_stats_strings) +
+ QLCNIC_TX_STATS_LEN * adapter->drv_tx_rings;
+}
+
+static int qlcnic_dev_statistics_len(struct qlcnic_adapter *adapter)
+{
+ int len = -1;
+
+ if (qlcnic_82xx_check(adapter)) {
+ len = qlcnic_82xx_statistics(adapter);
+ if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
+ len += ARRAY_SIZE(qlcnic_device_gstrings_stats);
+ } else if (qlcnic_83xx_check(adapter)) {
+ len = qlcnic_83xx_statistics(adapter);
+ }
+
+ return len;
+}
+
+#define QLCNIC_TX_INTR_NOT_CONFIGURED 0X78563412
+
+#define QLCNIC_MAX_EEPROM_LEN 1024
+
+static const u32 diag_registers[] = {
+ QLCNIC_CMDPEG_STATE,
+ QLCNIC_RCVPEG_STATE,
+ QLCNIC_FW_CAPABILITIES,
+ QLCNIC_CRB_DRV_ACTIVE,
+ QLCNIC_CRB_DEV_STATE,
+ QLCNIC_CRB_DRV_STATE,
+ QLCNIC_CRB_DRV_SCRATCH,
+ QLCNIC_CRB_DEV_PARTITION_INFO,
+ QLCNIC_CRB_DRV_IDC_VER,
+ QLCNIC_PEG_ALIVE_COUNTER,
+ QLCNIC_PEG_HALT_STATUS1,
+ QLCNIC_PEG_HALT_STATUS2,
+ -1
+};
+
+
+static const u32 ext_diag_registers[] = {
+ CRB_XG_STATE_P3P,
+ ISR_INT_STATE_REG,
+ QLCNIC_CRB_PEG_NET_0+0x3c,
+ QLCNIC_CRB_PEG_NET_1+0x3c,
+ QLCNIC_CRB_PEG_NET_2+0x3c,
+ QLCNIC_CRB_PEG_NET_4+0x3c,
+ -1
+};
+
+#define QLCNIC_MGMT_API_VERSION 3
+#define QLCNIC_ETHTOOL_REGS_VER 4
+
+static inline int qlcnic_get_ring_regs_len(struct qlcnic_adapter *adapter)
+{
+ int ring_regs_cnt = (adapter->drv_tx_rings * 5) +
+ (adapter->max_rds_rings * 2) +
+ (adapter->drv_sds_rings * 3) + 5;
+ return ring_regs_cnt * sizeof(u32);
+}
+
+static int qlcnic_get_regs_len(struct net_device *dev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ u32 len;
+
+ if (qlcnic_83xx_check(adapter))
+ len = qlcnic_83xx_get_regs_len(adapter);
+ else
+ len = sizeof(ext_diag_registers) + sizeof(diag_registers);
+
+ len += ((QLCNIC_DEV_INFO_SIZE + 2) * sizeof(u32));
+ len += qlcnic_get_ring_regs_len(adapter);
+ return len;
+}
+
+static int qlcnic_get_eeprom_len(struct net_device *dev)
+{
+ return QLCNIC_FLASH_TOTAL_SIZE;
+}
+
+static void
+qlcnic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ u32 fw_major, fw_minor, fw_build;
+ fw_major = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MAJOR);
+ fw_minor = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MINOR);
+ fw_build = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_SUB);
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d.%d", fw_major, fw_minor, fw_build);
+
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
+ strscpy(drvinfo->driver, qlcnic_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->version, QLCNIC_LINUX_VERSIONID,
+ sizeof(drvinfo->version));
+}
+
+static int qlcnic_82xx_get_link_ksettings(struct qlcnic_adapter *adapter,
+ struct ethtool_link_ksettings *ecmd)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u32 speed, reg;
+ int check_sfp_module = 0, err = 0;
+ u16 pcifn = ahw->pci_func;
+ u32 supported, advertising;
+
+ /* read which mode */
+ if (adapter->ahw->port_type == QLCNIC_GBE) {
+ supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full);
+
+ advertising = (ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full |
+ ADVERTISED_1000baseT_Half |
+ ADVERTISED_1000baseT_Full);
+
+ ecmd->base.speed = adapter->ahw->link_speed;
+ ecmd->base.duplex = adapter->ahw->link_duplex;
+ ecmd->base.autoneg = adapter->ahw->link_autoneg;
+
+ } else if (adapter->ahw->port_type == QLCNIC_XGBE) {
+ u32 val = 0;
+ val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR, &err);
+
+ if (val == QLCNIC_PORT_MODE_802_3_AP) {
+ supported = SUPPORTED_1000baseT_Full;
+ advertising = ADVERTISED_1000baseT_Full;
+ } else {
+ supported = SUPPORTED_10000baseT_Full;
+ advertising = ADVERTISED_10000baseT_Full;
+ }
+
+ if (netif_running(adapter->netdev) && ahw->has_link_events) {
+ if (ahw->linkup) {
+ reg = QLCRD32(adapter,
+ P3P_LINK_SPEED_REG(pcifn), &err);
+ speed = P3P_LINK_SPEED_VAL(pcifn, reg);
+ ahw->link_speed = speed * P3P_LINK_SPEED_MHZ;
+ }
+
+ ecmd->base.speed = ahw->link_speed;
+ ecmd->base.autoneg = ahw->link_autoneg;
+ ecmd->base.duplex = ahw->link_duplex;
+ goto skip;
+ }
+
+ ecmd->base.speed = SPEED_UNKNOWN;
+ ecmd->base.duplex = DUPLEX_UNKNOWN;
+ ecmd->base.autoneg = AUTONEG_DISABLE;
+ } else
+ return -EIO;
+
+skip:
+ ecmd->base.phy_address = adapter->ahw->physical_port;
+
+ switch (adapter->ahw->board_type) {
+ case QLCNIC_BRDTYPE_P3P_REF_QG:
+ case QLCNIC_BRDTYPE_P3P_4_GB:
+ case QLCNIC_BRDTYPE_P3P_4_GB_MM:
+ supported |= SUPPORTED_Autoneg;
+ advertising |= ADVERTISED_Autoneg;
+ fallthrough;
+ case QLCNIC_BRDTYPE_P3P_10G_CX4:
+ case QLCNIC_BRDTYPE_P3P_10G_CX4_LP:
+ case QLCNIC_BRDTYPE_P3P_10000_BASE_T:
+ supported |= SUPPORTED_TP;
+ advertising |= ADVERTISED_TP;
+ ecmd->base.port = PORT_TP;
+ ecmd->base.autoneg = adapter->ahw->link_autoneg;
+ break;
+ case QLCNIC_BRDTYPE_P3P_IMEZ:
+ case QLCNIC_BRDTYPE_P3P_XG_LOM:
+ case QLCNIC_BRDTYPE_P3P_HMEZ:
+ supported |= SUPPORTED_MII;
+ advertising |= ADVERTISED_MII;
+ ecmd->base.port = PORT_MII;
+ ecmd->base.autoneg = AUTONEG_DISABLE;
+ break;
+ case QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS:
+ case QLCNIC_BRDTYPE_P3P_10G_SFP_CT:
+ case QLCNIC_BRDTYPE_P3P_10G_SFP_QT:
+ advertising |= ADVERTISED_TP;
+ supported |= SUPPORTED_TP;
+ check_sfp_module = netif_running(adapter->netdev) &&
+ ahw->has_link_events;
+ fallthrough;
+ case QLCNIC_BRDTYPE_P3P_10G_XFP:
+ supported |= SUPPORTED_FIBRE;
+ advertising |= ADVERTISED_FIBRE;
+ ecmd->base.port = PORT_FIBRE;
+ ecmd->base.autoneg = AUTONEG_DISABLE;
+ break;
+ case QLCNIC_BRDTYPE_P3P_10G_TP:
+ if (adapter->ahw->port_type == QLCNIC_XGBE) {
+ ecmd->base.autoneg = AUTONEG_DISABLE;
+ supported |= (SUPPORTED_FIBRE | SUPPORTED_TP);
+ advertising |=
+ (ADVERTISED_FIBRE | ADVERTISED_TP);
+ ecmd->base.port = PORT_FIBRE;
+ check_sfp_module = netif_running(adapter->netdev) &&
+ ahw->has_link_events;
+ } else {
+ ecmd->base.autoneg = AUTONEG_ENABLE;
+ supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
+ advertising |=
+ (ADVERTISED_TP | ADVERTISED_Autoneg);
+ ecmd->base.port = PORT_TP;
+ }
+ break;
+ default:
+ dev_err(&adapter->pdev->dev, "Unsupported board model %d\n",
+ adapter->ahw->board_type);
+ return -EIO;
+ }
+
+ if (check_sfp_module) {
+ switch (adapter->ahw->module_type) {
+ case LINKEVENT_MODULE_OPTICAL_UNKNOWN:
+ case LINKEVENT_MODULE_OPTICAL_SRLR:
+ case LINKEVENT_MODULE_OPTICAL_LRM:
+ case LINKEVENT_MODULE_OPTICAL_SFP_1G:
+ ecmd->base.port = PORT_FIBRE;
+ break;
+ case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE:
+ case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN:
+ case LINKEVENT_MODULE_TWINAX:
+ ecmd->base.port = PORT_TP;
+ break;
+ default:
+ ecmd->base.port = PORT_OTHER;
+ }
+ }
+
+ ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.advertising,
+ advertising);
+
+ return 0;
+}
+
+static int qlcnic_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *ecmd)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+
+ if (qlcnic_82xx_check(adapter))
+ return qlcnic_82xx_get_link_ksettings(adapter, ecmd);
+ else if (qlcnic_83xx_check(adapter))
+ return qlcnic_83xx_get_link_ksettings(adapter, ecmd);
+
+ return -EIO;
+}
+
+
+static int qlcnic_set_port_config(struct qlcnic_adapter *adapter,
+ const struct ethtool_link_ksettings *ecmd)
+{
+ u32 ret = 0, config = 0;
+ /* read which mode */
+ if (ecmd->base.duplex)
+ config |= 0x1;
+
+ if (ecmd->base.autoneg)
+ config |= 0x2;
+
+ switch (ecmd->base.speed) {
+ case SPEED_10:
+ config |= (0 << 8);
+ break;
+ case SPEED_100:
+ config |= (1 << 8);
+ break;
+ case SPEED_1000:
+ config |= (10 << 8);
+ break;
+ default:
+ return -EIO;
+ }
+
+ ret = qlcnic_fw_cmd_set_port(adapter, config);
+
+ if (ret == QLCNIC_RCODE_NOT_SUPPORTED)
+ return -EOPNOTSUPP;
+ else if (ret)
+ return -EIO;
+ return ret;
+}
+
+static int qlcnic_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *ecmd)
+{
+ u32 ret = 0;
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+
+ if (qlcnic_83xx_check(adapter))
+ qlcnic_83xx_get_port_type(adapter);
+
+ if (adapter->ahw->port_type != QLCNIC_GBE)
+ return -EOPNOTSUPP;
+
+ if (qlcnic_83xx_check(adapter))
+ ret = qlcnic_83xx_set_link_ksettings(adapter, ecmd);
+ else
+ ret = qlcnic_set_port_config(adapter, ecmd);
+
+ if (!ret)
+ return ret;
+
+ adapter->ahw->link_speed = ecmd->base.speed;
+ adapter->ahw->link_duplex = ecmd->base.duplex;
+ adapter->ahw->link_autoneg = ecmd->base.autoneg;
+
+ if (!netif_running(dev))
+ return 0;
+
+ dev->netdev_ops->ndo_stop(dev);
+ return dev->netdev_ops->ndo_open(dev);
+}
+
+static int qlcnic_82xx_get_registers(struct qlcnic_adapter *adapter,
+ u32 *regs_buff)
+{
+ int i, j = 0, err = 0;
+
+ for (i = QLCNIC_DEV_INFO_SIZE + 1; diag_registers[j] != -1; j++, i++)
+ regs_buff[i] = QLC_SHARED_REG_RD32(adapter, diag_registers[j]);
+ j = 0;
+ while (ext_diag_registers[j] != -1)
+ regs_buff[i++] = QLCRD32(adapter, ext_diag_registers[j++],
+ &err);
+ return i;
+}
+
+static void
+qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_rds_ring *rds_rings;
+ struct qlcnic_host_tx_ring *tx_ring;
+ u32 *regs_buff = p;
+ int ring, i = 0;
+
+ memset(p, 0, qlcnic_get_regs_len(dev));
+
+ regs->version = (QLCNIC_ETHTOOL_REGS_VER << 24) |
+ (adapter->ahw->revision_id << 16) | (adapter->pdev)->device;
+
+ regs_buff[0] = (0xcafe0000 | (QLCNIC_DEV_INFO_SIZE & 0xffff));
+ regs_buff[1] = QLCNIC_MGMT_API_VERSION;
+
+ if (adapter->ahw->capabilities & QLC_83XX_ESWITCH_CAPABILITY)
+ regs_buff[2] = adapter->ahw->max_vnic_func;
+
+ if (qlcnic_82xx_check(adapter))
+ i = qlcnic_82xx_get_registers(adapter, regs_buff);
+ else
+ i = qlcnic_83xx_get_registers(adapter, regs_buff);
+
+ if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
+ return;
+
+ /* Marker btw regs and TX ring count */
+ regs_buff[i++] = 0xFFEFCDAB;
+
+ regs_buff[i++] = adapter->drv_tx_rings; /* No. of TX ring */
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ regs_buff[i++] = le32_to_cpu(*(tx_ring->hw_consumer));
+ regs_buff[i++] = tx_ring->sw_consumer;
+ regs_buff[i++] = readl(tx_ring->crb_cmd_producer);
+ regs_buff[i++] = tx_ring->producer;
+ if (tx_ring->crb_intr_mask)
+ regs_buff[i++] = readl(tx_ring->crb_intr_mask);
+ else
+ regs_buff[i++] = QLCNIC_TX_INTR_NOT_CONFIGURED;
+ }
+
+ regs_buff[i++] = adapter->max_rds_rings; /* No. of RX ring */
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_rings = &recv_ctx->rds_rings[ring];
+ regs_buff[i++] = readl(rds_rings->crb_rcv_producer);
+ regs_buff[i++] = rds_rings->producer;
+ }
+
+ regs_buff[i++] = adapter->drv_sds_rings; /* No. of SDS ring */
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+ sds_ring = &(recv_ctx->sds_rings[ring]);
+ regs_buff[i++] = readl(sds_ring->crb_sts_consumer);
+ regs_buff[i++] = sds_ring->consumer;
+ regs_buff[i++] = readl(sds_ring->crb_intr_mask);
+ }
+}
+
+static u32 qlcnic_test_link(struct net_device *dev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ int err = 0;
+ u32 val;
+
+ if (qlcnic_83xx_check(adapter)) {
+ val = qlcnic_83xx_test_link(adapter);
+ return (val & 1) ? 0 : 1;
+ }
+ val = QLCRD32(adapter, CRB_XG_STATE_P3P, &err);
+ if (err == -EIO)
+ return err;
+ val = XG_LINK_STATE_P3P(adapter->ahw->pci_func, val);
+ return (val == XG_LINK_UP_P3P) ? 0 : 1;
+}
+
+static int
+qlcnic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
+ u8 *bytes)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ int offset;
+ int ret = -1;
+
+ if (qlcnic_83xx_check(adapter))
+ return 0;
+ if (eeprom->len == 0)
+ return -EINVAL;
+
+ eeprom->magic = (adapter->pdev)->vendor |
+ ((adapter->pdev)->device << 16);
+ offset = eeprom->offset;
+
+ if (qlcnic_82xx_check(adapter))
+ ret = qlcnic_rom_fast_read_words(adapter, offset, bytes,
+ eeprom->len);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void
+qlcnic_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+
+ ring->rx_pending = adapter->num_rxd;
+ ring->rx_jumbo_pending = adapter->num_jumbo_rxd;
+ ring->tx_pending = adapter->num_txd;
+
+ ring->rx_max_pending = adapter->max_rxd;
+ ring->rx_jumbo_max_pending = adapter->max_jumbo_rxd;
+ ring->tx_max_pending = MAX_CMD_DESCRIPTORS;
+}
+
+static u32
+qlcnic_validate_ringparam(u32 val, u32 min, u32 max, char *r_name)
+{
+ u32 num_desc;
+ num_desc = max(val, min);
+ num_desc = min(num_desc, max);
+ num_desc = roundup_pow_of_two(num_desc);
+
+ if (val != num_desc) {
+ printk(KERN_INFO "%s: setting %s ring size %d instead of %d\n",
+ qlcnic_driver_name, r_name, num_desc, val);
+ }
+
+ return num_desc;
+}
+
+static int
+qlcnic_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ u16 num_rxd, num_jumbo_rxd, num_txd;
+
+ if (ring->rx_mini_pending)
+ return -EOPNOTSUPP;
+
+ num_rxd = qlcnic_validate_ringparam(ring->rx_pending,
+ MIN_RCV_DESCRIPTORS, adapter->max_rxd, "rx");
+
+ num_jumbo_rxd = qlcnic_validate_ringparam(ring->rx_jumbo_pending,
+ MIN_JUMBO_DESCRIPTORS, adapter->max_jumbo_rxd,
+ "rx jumbo");
+
+ num_txd = qlcnic_validate_ringparam(ring->tx_pending,
+ MIN_CMD_DESCRIPTORS, MAX_CMD_DESCRIPTORS, "tx");
+
+ if (num_rxd == adapter->num_rxd && num_txd == adapter->num_txd &&
+ num_jumbo_rxd == adapter->num_jumbo_rxd)
+ return 0;
+
+ adapter->num_rxd = num_rxd;
+ adapter->num_jumbo_rxd = num_jumbo_rxd;
+ adapter->num_txd = num_txd;
+
+ return qlcnic_reset_context(adapter);
+}
+
+static int qlcnic_validate_ring_count(struct qlcnic_adapter *adapter,
+ u8 rx_ring, u8 tx_ring)
+{
+ if (rx_ring == 0 || tx_ring == 0)
+ return -EINVAL;
+
+ if (rx_ring != 0) {
+ if (rx_ring > adapter->max_sds_rings) {
+ netdev_err(adapter->netdev,
+ "Invalid ring count, SDS ring count %d should not be greater than max %d driver sds rings.\n",
+ rx_ring, adapter->max_sds_rings);
+ return -EINVAL;
+ }
+ }
+
+ if (tx_ring != 0) {
+ if (tx_ring > adapter->max_tx_rings) {
+ netdev_err(adapter->netdev,
+ "Invalid ring count, Tx ring count %d should not be greater than max %d driver Tx rings.\n",
+ tx_ring, adapter->max_tx_rings);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static void qlcnic_get_channels(struct net_device *dev,
+ struct ethtool_channels *channel)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+
+ channel->max_rx = adapter->max_sds_rings;
+ channel->max_tx = adapter->max_tx_rings;
+ channel->rx_count = adapter->drv_sds_rings;
+ channel->tx_count = adapter->drv_tx_rings;
+}
+
+static int qlcnic_set_channels(struct net_device *dev,
+ struct ethtool_channels *channel)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ int err;
+
+ if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ netdev_err(dev, "No RSS/TSS support in non MSI-X mode\n");
+ return -EINVAL;
+ }
+
+ if (channel->other_count || channel->combined_count)
+ return -EINVAL;
+
+ err = qlcnic_validate_ring_count(adapter, channel->rx_count,
+ channel->tx_count);
+ if (err)
+ return err;
+
+ if (adapter->drv_sds_rings != channel->rx_count) {
+ err = qlcnic_validate_rings(adapter, channel->rx_count,
+ QLCNIC_RX_QUEUE);
+ if (err) {
+ netdev_err(dev, "Unable to configure %u SDS rings\n",
+ channel->rx_count);
+ return err;
+ }
+ adapter->drv_rss_rings = channel->rx_count;
+ }
+
+ if (adapter->drv_tx_rings != channel->tx_count) {
+ err = qlcnic_validate_rings(adapter, channel->tx_count,
+ QLCNIC_TX_QUEUE);
+ if (err) {
+ netdev_err(dev, "Unable to configure %u Tx rings\n",
+ channel->tx_count);
+ return err;
+ }
+ adapter->drv_tss_rings = channel->tx_count;
+ }
+
+ adapter->flags |= QLCNIC_TSS_RSS;
+
+ err = qlcnic_setup_rings(adapter);
+ netdev_info(dev, "Allocated %d SDS rings and %d Tx rings\n",
+ adapter->drv_sds_rings, adapter->drv_tx_rings);
+
+ return err;
+}
+
+static void
+qlcnic_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int port = adapter->ahw->physical_port;
+ int err = 0;
+ __u32 val;
+
+ if (qlcnic_83xx_check(adapter)) {
+ qlcnic_83xx_get_pauseparam(adapter, pause);
+ return;
+ }
+ if (adapter->ahw->port_type == QLCNIC_GBE) {
+ if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS))
+ return;
+ /* get flow control settings */
+ val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port), &err);
+ if (err == -EIO)
+ return;
+ pause->rx_pause = qlcnic_gb_get_rx_flowctl(val);
+ val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, &err);
+ if (err == -EIO)
+ return;
+ switch (port) {
+ case 0:
+ pause->tx_pause = !(qlcnic_gb_get_gb0_mask(val));
+ break;
+ case 1:
+ pause->tx_pause = !(qlcnic_gb_get_gb1_mask(val));
+ break;
+ case 2:
+ pause->tx_pause = !(qlcnic_gb_get_gb2_mask(val));
+ break;
+ case 3:
+ default:
+ pause->tx_pause = !(qlcnic_gb_get_gb3_mask(val));
+ break;
+ }
+ } else if (adapter->ahw->port_type == QLCNIC_XGBE) {
+ if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS))
+ return;
+ pause->rx_pause = 1;
+ val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL, &err);
+ if (err == -EIO)
+ return;
+ if (port == 0)
+ pause->tx_pause = !(qlcnic_xg_get_xg0_mask(val));
+ else
+ pause->tx_pause = !(qlcnic_xg_get_xg1_mask(val));
+ } else {
+ dev_err(&netdev->dev, "Unknown board type: %x\n",
+ adapter->ahw->port_type);
+ }
+}
+
+static int
+qlcnic_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int port = adapter->ahw->physical_port;
+ int err = 0;
+ __u32 val;
+
+ if (qlcnic_83xx_check(adapter))
+ return qlcnic_83xx_set_pauseparam(adapter, pause);
+
+ /* read mode */
+ if (adapter->ahw->port_type == QLCNIC_GBE) {
+ if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS))
+ return -EIO;
+ /* set flow control */
+ val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port), &err);
+ if (err == -EIO)
+ return err;
+
+ if (pause->rx_pause)
+ qlcnic_gb_rx_flowctl(val);
+ else
+ qlcnic_gb_unset_rx_flowctl(val);
+
+ QLCWR32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port),
+ val);
+ QLCWR32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port), val);
+ /* set autoneg */
+ val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, &err);
+ if (err == -EIO)
+ return err;
+ switch (port) {
+ case 0:
+ if (pause->tx_pause)
+ qlcnic_gb_unset_gb0_mask(val);
+ else
+ qlcnic_gb_set_gb0_mask(val);
+ break;
+ case 1:
+ if (pause->tx_pause)
+ qlcnic_gb_unset_gb1_mask(val);
+ else
+ qlcnic_gb_set_gb1_mask(val);
+ break;
+ case 2:
+ if (pause->tx_pause)
+ qlcnic_gb_unset_gb2_mask(val);
+ else
+ qlcnic_gb_set_gb2_mask(val);
+ break;
+ case 3:
+ default:
+ if (pause->tx_pause)
+ qlcnic_gb_unset_gb3_mask(val);
+ else
+ qlcnic_gb_set_gb3_mask(val);
+ break;
+ }
+ QLCWR32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, val);
+ } else if (adapter->ahw->port_type == QLCNIC_XGBE) {
+ if (!pause->rx_pause || pause->autoneg)
+ return -EOPNOTSUPP;
+
+ if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS))
+ return -EIO;
+
+ val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL, &err);
+ if (err == -EIO)
+ return err;
+ if (port == 0) {
+ if (pause->tx_pause)
+ qlcnic_xg_unset_xg0_mask(val);
+ else
+ qlcnic_xg_set_xg0_mask(val);
+ } else {
+ if (pause->tx_pause)
+ qlcnic_xg_unset_xg1_mask(val);
+ else
+ qlcnic_xg_set_xg1_mask(val);
+ }
+ QLCWR32(adapter, QLCNIC_NIU_XG_PAUSE_CTL, val);
+ } else {
+ dev_err(&netdev->dev, "Unknown board type: %x\n",
+ adapter->ahw->port_type);
+ }
+ return 0;
+}
+
+static int qlcnic_reg_test(struct net_device *dev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ u32 data_read;
+ int err = 0;
+
+ if (qlcnic_83xx_check(adapter))
+ return qlcnic_83xx_reg_test(adapter);
+
+ data_read = QLCRD32(adapter, QLCNIC_PCIX_PH_REG(0), &err);
+ if (err == -EIO)
+ return err;
+ if ((data_read & 0xffff) != adapter->pdev->vendor)
+ return 1;
+
+ return 0;
+}
+
+static int qlcnic_eeprom_test(struct net_device *dev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+
+ if (qlcnic_82xx_check(adapter))
+ return 0;
+
+ return qlcnic_83xx_flash_test(adapter);
+}
+
+static int qlcnic_get_sset_count(struct net_device *dev, int sset)
+{
+
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ switch (sset) {
+ case ETH_SS_TEST:
+ return QLCNIC_TEST_LEN;
+ case ETH_SS_STATS:
+ return qlcnic_dev_statistics_len(adapter);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int qlcnic_irq_test(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_cmd_args cmd;
+ int ret, drv_sds_rings = adapter->drv_sds_rings;
+ int drv_tx_rings = adapter->drv_tx_rings;
+
+ if (qlcnic_83xx_check(adapter))
+ return qlcnic_83xx_interrupt_test(netdev);
+
+ if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ return -EIO;
+
+ ret = qlcnic_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST);
+ if (ret)
+ goto clear_diag_irq;
+
+ ahw->diag_cnt = 0;
+ ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INTRPT_TEST);
+ if (ret)
+ goto free_diag_res;
+
+ cmd.req.arg[1] = ahw->pci_func;
+ ret = qlcnic_issue_cmd(adapter, &cmd);
+ if (ret)
+ goto done;
+
+ usleep_range(1000, 12000);
+ ret = !ahw->diag_cnt;
+
+done:
+ qlcnic_free_mbx_args(&cmd);
+
+free_diag_res:
+ qlcnic_diag_free_res(netdev, drv_sds_rings);
+
+clear_diag_irq:
+ adapter->drv_sds_rings = drv_sds_rings;
+ adapter->drv_tx_rings = drv_tx_rings;
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+
+ return ret;
+}
+
+#define QLCNIC_ILB_PKT_SIZE 64
+#define QLCNIC_NUM_ILB_PKT 16
+#define QLCNIC_ILB_MAX_RCV_LOOP 10
+#define QLCNIC_LB_PKT_POLL_DELAY_MSEC 1
+#define QLCNIC_LB_PKT_POLL_COUNT 20
+
+static void qlcnic_create_loopback_buff(unsigned char *data, u8 mac[])
+{
+ static const unsigned char random_data[] = {0xa8, 0x06, 0x45, 0x00};
+
+ memset(data, 0x4e, QLCNIC_ILB_PKT_SIZE);
+
+ memcpy(data, mac, ETH_ALEN);
+ memcpy(data + ETH_ALEN, mac, ETH_ALEN);
+
+ memcpy(data + 2 * ETH_ALEN, random_data, sizeof(random_data));
+}
+
+int qlcnic_check_loopback_buff(unsigned char *data, u8 mac[])
+{
+ unsigned char buff[QLCNIC_ILB_PKT_SIZE];
+ qlcnic_create_loopback_buff(buff, mac);
+ return memcmp(data, buff, QLCNIC_ILB_PKT_SIZE);
+}
+
+int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
+{
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_host_sds_ring *sds_ring = &recv_ctx->sds_rings[0];
+ struct sk_buff *skb;
+ int i, loop, cnt = 0;
+
+ for (i = 0; i < QLCNIC_NUM_ILB_PKT; i++) {
+ skb = netdev_alloc_skb(adapter->netdev, QLCNIC_ILB_PKT_SIZE);
+ if (!skb)
+ goto error;
+ qlcnic_create_loopback_buff(skb->data, adapter->mac_addr);
+ skb_put(skb, QLCNIC_ILB_PKT_SIZE);
+ adapter->ahw->diag_cnt = 0;
+ qlcnic_xmit_frame(skb, adapter->netdev);
+ loop = 0;
+
+ do {
+ msleep(QLCNIC_LB_PKT_POLL_DELAY_MSEC);
+ qlcnic_process_rcv_ring_diag(sds_ring);
+ if (loop++ > QLCNIC_LB_PKT_POLL_COUNT)
+ break;
+ } while (!adapter->ahw->diag_cnt);
+
+ dev_kfree_skb_any(skb);
+
+ if (!adapter->ahw->diag_cnt)
+ dev_warn(&adapter->pdev->dev,
+ "LB Test: packet #%d was not received\n",
+ i + 1);
+ else
+ cnt++;
+ }
+ if (cnt != i) {
+error:
+ dev_err(&adapter->pdev->dev,
+ "LB Test: failed, TX[%d], RX[%d]\n", i, cnt);
+ if (mode != QLCNIC_ILB_MODE)
+ dev_warn(&adapter->pdev->dev,
+ "WARNING: Please check loopback cable\n");
+ return -1;
+ }
+ return 0;
+}
+
+static int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int drv_tx_rings = adapter->drv_tx_rings;
+ int drv_sds_rings = adapter->drv_sds_rings;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int loop = 0;
+ int ret;
+
+ if (qlcnic_83xx_check(adapter))
+ return qlcnic_83xx_loopback_test(netdev, mode);
+
+ if (!(ahw->capabilities & QLCNIC_FW_CAPABILITY_MULTI_LOOPBACK)) {
+ dev_info(&adapter->pdev->dev,
+ "Firmware do not support loopback test\n");
+ return -EOPNOTSUPP;
+ }
+
+ dev_warn(&adapter->pdev->dev, "%s loopback test in progress\n",
+ mode == QLCNIC_ILB_MODE ? "internal" : "external");
+ if (ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
+ dev_warn(&adapter->pdev->dev,
+ "Loopback test not supported in nonprivileged mode\n");
+ return 0;
+ }
+
+ if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ return -EBUSY;
+
+ ret = qlcnic_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST);
+ if (ret)
+ goto clear_it;
+
+ sds_ring = &adapter->recv_ctx->sds_rings[0];
+ ret = qlcnic_set_lb_mode(adapter, mode);
+ if (ret)
+ goto free_res;
+
+ ahw->diag_cnt = 0;
+ do {
+ msleep(500);
+ qlcnic_process_rcv_ring_diag(sds_ring);
+ if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) {
+ netdev_info(netdev,
+ "Firmware didn't sent link up event to loopback request\n");
+ ret = -ETIMEDOUT;
+ goto free_res;
+ } else if (adapter->ahw->diag_cnt) {
+ ret = adapter->ahw->diag_cnt;
+ goto free_res;
+ }
+ } while (!QLCNIC_IS_LB_CONFIGURED(ahw->loopback_state));
+
+ ret = qlcnic_do_lb_test(adapter, mode);
+
+ qlcnic_clear_lb_mode(adapter, mode);
+
+ free_res:
+ qlcnic_diag_free_res(netdev, drv_sds_rings);
+
+ clear_it:
+ adapter->drv_sds_rings = drv_sds_rings;
+ adapter->drv_tx_rings = drv_tx_rings;
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return ret;
+}
+
+static void
+qlcnic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
+ u64 *data)
+{
+ memset(data, 0, sizeof(u64) * QLCNIC_TEST_LEN);
+
+ data[0] = qlcnic_reg_test(dev);
+ if (data[0])
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ data[1] = (u64) qlcnic_test_link(dev);
+ if (data[1])
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ if (eth_test->flags & ETH_TEST_FL_OFFLINE) {
+ data[2] = qlcnic_irq_test(dev);
+ if (data[2])
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ data[3] = qlcnic_loopback_test(dev, QLCNIC_ILB_MODE);
+ if (data[3])
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ if (eth_test->flags & ETH_TEST_FL_EXTERNAL_LB) {
+ data[4] = qlcnic_loopback_test(dev, QLCNIC_ELB_MODE);
+ if (data[4])
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ eth_test->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
+ }
+
+ data[5] = qlcnic_eeprom_test(dev);
+ if (data[5])
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ }
+}
+
+static void
+qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ int index, i, num_stats;
+
+ switch (stringset) {
+ case ETH_SS_TEST:
+ memcpy(data, *qlcnic_gstrings_test,
+ QLCNIC_TEST_LEN * ETH_GSTRING_LEN);
+ break;
+ case ETH_SS_STATS:
+ num_stats = ARRAY_SIZE(qlcnic_tx_queue_stats_strings);
+ for (i = 0; i < adapter->drv_tx_rings; i++) {
+ for (index = 0; index < num_stats; index++) {
+ sprintf(data, "tx_queue_%d %s", i,
+ qlcnic_tx_queue_stats_strings[index]);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+
+ for (index = 0; index < QLCNIC_STATS_LEN; index++) {
+ memcpy(data + index * ETH_GSTRING_LEN,
+ qlcnic_gstrings_stats[index].stat_string,
+ ETH_GSTRING_LEN);
+ }
+
+ if (qlcnic_83xx_check(adapter)) {
+ num_stats = ARRAY_SIZE(qlcnic_83xx_tx_stats_strings);
+ for (i = 0; i < num_stats; i++, index++)
+ memcpy(data + index * ETH_GSTRING_LEN,
+ qlcnic_83xx_tx_stats_strings[i],
+ ETH_GSTRING_LEN);
+ num_stats = ARRAY_SIZE(qlcnic_83xx_mac_stats_strings);
+ for (i = 0; i < num_stats; i++, index++)
+ memcpy(data + index * ETH_GSTRING_LEN,
+ qlcnic_83xx_mac_stats_strings[i],
+ ETH_GSTRING_LEN);
+ num_stats = ARRAY_SIZE(qlcnic_83xx_rx_stats_strings);
+ for (i = 0; i < num_stats; i++, index++)
+ memcpy(data + index * ETH_GSTRING_LEN,
+ qlcnic_83xx_rx_stats_strings[i],
+ ETH_GSTRING_LEN);
+ return;
+ } else {
+ num_stats = ARRAY_SIZE(qlcnic_83xx_mac_stats_strings);
+ for (i = 0; i < num_stats; i++, index++)
+ memcpy(data + index * ETH_GSTRING_LEN,
+ qlcnic_83xx_mac_stats_strings[i],
+ ETH_GSTRING_LEN);
+ }
+ if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
+ return;
+ num_stats = ARRAY_SIZE(qlcnic_device_gstrings_stats);
+ for (i = 0; i < num_stats; index++, i++) {
+ memcpy(data + index * ETH_GSTRING_LEN,
+ qlcnic_device_gstrings_stats[i],
+ ETH_GSTRING_LEN);
+ }
+ }
+}
+
+static u64 *qlcnic_fill_stats(u64 *data, void *stats, int type)
+{
+ if (type == QLCNIC_MAC_STATS) {
+ struct qlcnic_mac_statistics *mac_stats =
+ (struct qlcnic_mac_statistics *)stats;
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_frames);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_bytes);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_mcast_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_bcast_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_pause_cnt);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_ctrl_pkt);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_64b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_127b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_255b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_511b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_1023b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_1518b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_tx_gt_1518b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_frames);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_bytes);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_mcast_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_bcast_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_pause_cnt);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_ctrl_pkt);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_64b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_127b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_255b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_511b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_1023b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_1518b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_gt_1518b_pkts);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_length_error);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_length_small);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_length_large);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_jabber);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_dropped);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_rx_crc_error);
+ *data++ = QLCNIC_FILL_STATS(mac_stats->mac_align_error);
+ } else if (type == QLCNIC_ESW_STATS) {
+ struct __qlcnic_esw_statistics *esw_stats =
+ (struct __qlcnic_esw_statistics *)stats;
+ *data++ = QLCNIC_FILL_STATS(esw_stats->unicast_frames);
+ *data++ = QLCNIC_FILL_STATS(esw_stats->multicast_frames);
+ *data++ = QLCNIC_FILL_STATS(esw_stats->broadcast_frames);
+ *data++ = QLCNIC_FILL_STATS(esw_stats->dropped_frames);
+ *data++ = QLCNIC_FILL_STATS(esw_stats->errors);
+ *data++ = QLCNIC_FILL_STATS(esw_stats->local_frames);
+ *data++ = QLCNIC_FILL_STATS(esw_stats->numbytes);
+ }
+ return data;
+}
+
+void qlcnic_update_stats(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_tx_queue_stats tx_stats;
+ struct qlcnic_host_tx_ring *tx_ring;
+ int ring;
+
+ memset(&tx_stats, 0, sizeof(tx_stats));
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ tx_stats.xmit_on += tx_ring->tx_stats.xmit_on;
+ tx_stats.xmit_off += tx_ring->tx_stats.xmit_off;
+ tx_stats.xmit_called += tx_ring->tx_stats.xmit_called;
+ tx_stats.xmit_finished += tx_ring->tx_stats.xmit_finished;
+ tx_stats.tx_bytes += tx_ring->tx_stats.tx_bytes;
+ }
+
+ adapter->stats.xmit_on = tx_stats.xmit_on;
+ adapter->stats.xmit_off = tx_stats.xmit_off;
+ adapter->stats.xmitcalled = tx_stats.xmit_called;
+ adapter->stats.xmitfinished = tx_stats.xmit_finished;
+ adapter->stats.txbytes = tx_stats.tx_bytes;
+}
+
+static u64 *qlcnic_fill_tx_queue_stats(u64 *data, void *stats)
+{
+ struct qlcnic_host_tx_ring *tx_ring;
+
+ tx_ring = (struct qlcnic_host_tx_ring *)stats;
+
+ *data++ = QLCNIC_FILL_STATS(tx_ring->tx_stats.xmit_on);
+ *data++ = QLCNIC_FILL_STATS(tx_ring->tx_stats.xmit_off);
+ *data++ = QLCNIC_FILL_STATS(tx_ring->tx_stats.xmit_called);
+ *data++ = QLCNIC_FILL_STATS(tx_ring->tx_stats.xmit_finished);
+ *data++ = QLCNIC_FILL_STATS(tx_ring->tx_stats.tx_bytes);
+
+ return data;
+}
+
+static void qlcnic_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ struct qlcnic_host_tx_ring *tx_ring;
+ struct qlcnic_esw_statistics port_stats;
+ struct qlcnic_mac_statistics mac_stats;
+ int index, ret, length, size, ring;
+ char *p;
+
+ memset(data, 0, stats->n_stats * sizeof(u64));
+
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) {
+ tx_ring = &adapter->tx_ring[ring];
+ data = qlcnic_fill_tx_queue_stats(data, tx_ring);
+ qlcnic_update_stats(adapter);
+ } else {
+ data += QLCNIC_TX_STATS_LEN;
+ }
+ }
+
+ length = QLCNIC_STATS_LEN;
+ for (index = 0; index < length; index++) {
+ p = (char *)adapter + qlcnic_gstrings_stats[index].stat_offset;
+ size = qlcnic_gstrings_stats[index].sizeof_stat;
+ *data++ = (size == sizeof(u64)) ? (*(u64 *)p) : ((*(u32 *)p));
+ }
+
+ if (qlcnic_83xx_check(adapter)) {
+ if (adapter->ahw->linkup)
+ qlcnic_83xx_get_stats(adapter, data);
+ return;
+ } else {
+ /* Retrieve MAC statistics from firmware */
+ memset(&mac_stats, 0, sizeof(struct qlcnic_mac_statistics));
+ qlcnic_get_mac_stats(adapter, &mac_stats);
+ data = qlcnic_fill_stats(data, &mac_stats, QLCNIC_MAC_STATS);
+ }
+
+ if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
+ return;
+
+ memset(&port_stats, 0, sizeof(struct qlcnic_esw_statistics));
+ ret = qlcnic_get_port_stats(adapter, adapter->ahw->pci_func,
+ QLCNIC_QUERY_RX_COUNTER, &port_stats.rx);
+ if (ret)
+ return;
+
+ data = qlcnic_fill_stats(data, &port_stats.rx, QLCNIC_ESW_STATS);
+ ret = qlcnic_get_port_stats(adapter, adapter->ahw->pci_func,
+ QLCNIC_QUERY_TX_COUNTER, &port_stats.tx);
+ if (ret)
+ return;
+
+ qlcnic_fill_stats(data, &port_stats.tx, QLCNIC_ESW_STATS);
+}
+
+static int qlcnic_set_led(struct net_device *dev,
+ enum ethtool_phys_id_state state)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ int drv_sds_rings = adapter->drv_sds_rings;
+ int err = -EIO, active = 1;
+
+ if (qlcnic_83xx_check(adapter))
+ return qlcnic_83xx_set_led(dev, state);
+
+ if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
+ netdev_warn(dev, "LED test not supported for non "
+ "privilege function\n");
+ return -EOPNOTSUPP;
+ }
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state))
+ return -EBUSY;
+
+ if (test_bit(__QLCNIC_RESETTING, &adapter->state))
+ break;
+
+ if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
+ if (qlcnic_diag_alloc_res(dev, QLCNIC_LED_TEST))
+ break;
+ set_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state);
+ }
+
+ if (adapter->nic_ops->config_led(adapter, 1, 0xf) == 0) {
+ err = 0;
+ break;
+ }
+
+ dev_err(&adapter->pdev->dev,
+ "Failed to set LED blink state.\n");
+ break;
+
+ case ETHTOOL_ID_INACTIVE:
+ active = 0;
+
+ if (test_bit(__QLCNIC_RESETTING, &adapter->state))
+ break;
+
+ if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
+ if (qlcnic_diag_alloc_res(dev, QLCNIC_LED_TEST))
+ break;
+ set_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state);
+ }
+
+ if (adapter->nic_ops->config_led(adapter, 0, 0xf))
+ dev_err(&adapter->pdev->dev,
+ "Failed to reset LED blink state.\n");
+
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state))
+ qlcnic_diag_free_res(dev, drv_sds_rings);
+
+ if (!active || err)
+ clear_bit(__QLCNIC_LED_ENABLE, &adapter->state);
+
+ return err;
+}
+
+static void
+qlcnic_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ u32 wol_cfg;
+ int err = 0;
+
+ if (qlcnic_83xx_check(adapter))
+ return;
+ wol->supported = 0;
+ wol->wolopts = 0;
+
+ wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV, &err);
+ if (err == -EIO)
+ return;
+ if (wol_cfg & (1UL << adapter->portnum))
+ wol->supported |= WAKE_MAGIC;
+
+ wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG, &err);
+ if (wol_cfg & (1UL << adapter->portnum))
+ wol->wolopts |= WAKE_MAGIC;
+}
+
+static int
+qlcnic_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ u32 wol_cfg;
+ int err = 0;
+
+ if (qlcnic_83xx_check(adapter))
+ return -EOPNOTSUPP;
+ if (wol->wolopts & ~WAKE_MAGIC)
+ return -EINVAL;
+
+ wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV, &err);
+ if (err == -EIO)
+ return err;
+ if (!(wol_cfg & (1 << adapter->portnum)))
+ return -EOPNOTSUPP;
+
+ wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG, &err);
+ if (err == -EIO)
+ return err;
+ if (wol->wolopts & WAKE_MAGIC)
+ wol_cfg |= 1UL << adapter->portnum;
+ else
+ wol_cfg &= ~(1UL << adapter->portnum);
+
+ QLCWR32(adapter, QLCNIC_WOL_CONFIG, wol_cfg);
+
+ return 0;
+}
+
+/*
+ * Set the coalescing parameters. Currently only normal is supported.
+ * If rx_coalesce_usecs == 0 or rx_max_coalesced_frames == 0 then set the
+ * firmware coalescing to default.
+ */
+static int qlcnic_set_intr_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ethcoal,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int err;
+
+ if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
+ return -EINVAL;
+
+ /*
+ * Return Error if unsupported values or
+ * unsupported parameters are set.
+ */
+ if (ethcoal->rx_coalesce_usecs > 0xffff ||
+ ethcoal->rx_max_coalesced_frames > 0xffff ||
+ ethcoal->tx_coalesce_usecs > 0xffff ||
+ ethcoal->tx_max_coalesced_frames > 0xffff)
+ return -EINVAL;
+
+ err = qlcnic_config_intr_coalesce(adapter, ethcoal);
+
+ return err;
+}
+
+static int qlcnic_get_intr_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ethcoal,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return -EINVAL;
+
+ ethcoal->rx_coalesce_usecs = adapter->ahw->coal.rx_time_us;
+ ethcoal->rx_max_coalesced_frames = adapter->ahw->coal.rx_packets;
+ ethcoal->tx_coalesce_usecs = adapter->ahw->coal.tx_time_us;
+ ethcoal->tx_max_coalesced_frames = adapter->ahw->coal.tx_packets;
+
+ return 0;
+}
+
+static u32 qlcnic_get_msglevel(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ return adapter->ahw->msg_enable;
+}
+
+static void qlcnic_set_msglevel(struct net_device *netdev, u32 msglvl)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ adapter->ahw->msg_enable = msglvl;
+}
+
+int qlcnic_enable_fw_dump_state(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+ u32 val;
+
+ if (qlcnic_84xx_check(adapter)) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ val &= ~QLC_83XX_IDC_DISABLE_FW_DUMP;
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val);
+
+ qlcnic_83xx_unlock_driver(adapter);
+ } else {
+ fw_dump->enable = true;
+ }
+
+ dev_info(&adapter->pdev->dev, "FW dump enabled\n");
+
+ return 0;
+}
+
+static int qlcnic_disable_fw_dump_state(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+ u32 val;
+
+ if (qlcnic_84xx_check(adapter)) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ val |= QLC_83XX_IDC_DISABLE_FW_DUMP;
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val);
+
+ qlcnic_83xx_unlock_driver(adapter);
+ } else {
+ fw_dump->enable = false;
+ }
+
+ dev_info(&adapter->pdev->dev, "FW dump disabled\n");
+
+ return 0;
+}
+
+bool qlcnic_check_fw_dump_state(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+ bool state;
+ u32 val;
+
+ if (qlcnic_84xx_check(adapter)) {
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ state = (val & QLC_83XX_IDC_DISABLE_FW_DUMP) ? false : true;
+ } else {
+ state = fw_dump->enable;
+ }
+
+ return state;
+}
+
+static int
+qlcnic_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+
+ if (!fw_dump->tmpl_hdr) {
+ netdev_err(adapter->netdev, "FW Dump not supported\n");
+ return -ENOTSUPP;
+ }
+
+ if (fw_dump->clr)
+ dump->len = fw_dump->tmpl_hdr_size + fw_dump->size;
+ else
+ dump->len = 0;
+
+ if (!qlcnic_check_fw_dump_state(adapter))
+ dump->flag = ETH_FW_DUMP_DISABLE;
+ else
+ dump->flag = fw_dump->cap_mask;
+
+ dump->version = adapter->fw_version;
+ return 0;
+}
+
+static int
+qlcnic_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
+ void *buffer)
+{
+ int i, copy_sz;
+ u32 *hdr_ptr;
+ __le32 *data;
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+
+ if (!fw_dump->tmpl_hdr) {
+ netdev_err(netdev, "FW Dump not supported\n");
+ return -ENOTSUPP;
+ }
+
+ if (!fw_dump->clr) {
+ netdev_info(netdev, "Dump not available\n");
+ return -EINVAL;
+ }
+
+ /* Copy template header first */
+ copy_sz = fw_dump->tmpl_hdr_size;
+ hdr_ptr = (u32 *)fw_dump->tmpl_hdr;
+ data = buffer;
+ for (i = 0; i < copy_sz/sizeof(u32); i++)
+ *data++ = cpu_to_le32(*hdr_ptr++);
+
+ /* Copy captured dump data */
+ memcpy(buffer + copy_sz, fw_dump->data, fw_dump->size);
+ dump->len = copy_sz + fw_dump->size;
+ dump->flag = fw_dump->cap_mask;
+
+ /* Free dump area once data has been captured */
+ vfree(fw_dump->data);
+ fw_dump->data = NULL;
+ fw_dump->clr = 0;
+ netdev_info(netdev, "extracted the FW dump Successfully\n");
+ return 0;
+}
+
+static int qlcnic_set_dump_mask(struct qlcnic_adapter *adapter, u32 mask)
+{
+ struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+ struct net_device *netdev = adapter->netdev;
+
+ if (!qlcnic_check_fw_dump_state(adapter)) {
+ netdev_info(netdev,
+ "Can not change driver mask to 0x%x. FW dump not enabled\n",
+ mask);
+ return -EOPNOTSUPP;
+ }
+
+ fw_dump->cap_mask = mask;
+
+ /* Store new capture mask in template header as well*/
+ qlcnic_store_cap_mask(adapter, fw_dump->tmpl_hdr, mask);
+
+ netdev_info(netdev, "Driver mask changed to: 0x%x\n", mask);
+ return 0;
+}
+
+static int
+qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+ bool valid_mask = false;
+ int i, ret = 0;
+
+ switch (val->flag) {
+ case QLCNIC_FORCE_FW_DUMP_KEY:
+ if (!fw_dump->tmpl_hdr) {
+ netdev_err(netdev, "FW dump not supported\n");
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ if (!qlcnic_check_fw_dump_state(adapter)) {
+ netdev_info(netdev, "FW dump not enabled\n");
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ if (fw_dump->clr) {
+ netdev_info(netdev,
+ "Previous dump not cleared, not forcing dump\n");
+ break;
+ }
+
+ netdev_info(netdev, "Forcing a FW dump\n");
+ qlcnic_dev_request_reset(adapter, val->flag);
+ break;
+ case QLCNIC_DISABLE_FW_DUMP:
+ if (!fw_dump->tmpl_hdr) {
+ netdev_err(netdev, "FW dump not supported\n");
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ ret = qlcnic_disable_fw_dump_state(adapter);
+ break;
+
+ case QLCNIC_ENABLE_FW_DUMP:
+ if (!fw_dump->tmpl_hdr) {
+ netdev_err(netdev, "FW dump not supported\n");
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ ret = qlcnic_enable_fw_dump_state(adapter);
+ break;
+
+ case QLCNIC_FORCE_FW_RESET:
+ netdev_info(netdev, "Forcing a FW reset\n");
+ qlcnic_dev_request_reset(adapter, val->flag);
+ adapter->flags &= ~QLCNIC_FW_RESET_OWNER;
+ break;
+
+ case QLCNIC_SET_QUIESCENT:
+ case QLCNIC_RESET_QUIESCENT:
+ if (test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state))
+ netdev_info(netdev, "Device is in non-operational state\n");
+ break;
+
+ default:
+ if (!fw_dump->tmpl_hdr) {
+ netdev_err(netdev, "FW dump not supported\n");
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(qlcnic_fw_dump_level); i++) {
+ if (val->flag == qlcnic_fw_dump_level[i]) {
+ valid_mask = true;
+ break;
+ }
+ }
+
+ if (valid_mask) {
+ ret = qlcnic_set_dump_mask(adapter, val->flag);
+ } else {
+ netdev_info(netdev, "Invalid dump level: 0x%x\n",
+ val->flag);
+ ret = -EINVAL;
+ }
+ }
+ return ret;
+}
+
+const struct ethtool_ops qlcnic_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
+ .get_drvinfo = qlcnic_get_drvinfo,
+ .get_regs_len = qlcnic_get_regs_len,
+ .get_regs = qlcnic_get_regs,
+ .get_link = ethtool_op_get_link,
+ .get_eeprom_len = qlcnic_get_eeprom_len,
+ .get_eeprom = qlcnic_get_eeprom,
+ .get_ringparam = qlcnic_get_ringparam,
+ .set_ringparam = qlcnic_set_ringparam,
+ .get_channels = qlcnic_get_channels,
+ .set_channels = qlcnic_set_channels,
+ .get_pauseparam = qlcnic_get_pauseparam,
+ .set_pauseparam = qlcnic_set_pauseparam,
+ .get_wol = qlcnic_get_wol,
+ .set_wol = qlcnic_set_wol,
+ .self_test = qlcnic_diag_test,
+ .get_strings = qlcnic_get_strings,
+ .get_ethtool_stats = qlcnic_get_ethtool_stats,
+ .get_sset_count = qlcnic_get_sset_count,
+ .get_coalesce = qlcnic_get_intr_coalesce,
+ .set_coalesce = qlcnic_set_intr_coalesce,
+ .set_phys_id = qlcnic_set_led,
+ .set_msglevel = qlcnic_set_msglevel,
+ .get_msglevel = qlcnic_get_msglevel,
+ .get_dump_flag = qlcnic_get_dump_flag,
+ .get_dump_data = qlcnic_get_dump_data,
+ .set_dump = qlcnic_set_dump,
+ .get_link_ksettings = qlcnic_get_link_ksettings,
+ .set_link_ksettings = qlcnic_set_link_ksettings,
+};
+
+const struct ethtool_ops qlcnic_sriov_vf_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
+ .get_drvinfo = qlcnic_get_drvinfo,
+ .get_regs_len = qlcnic_get_regs_len,
+ .get_regs = qlcnic_get_regs,
+ .get_link = ethtool_op_get_link,
+ .get_eeprom_len = qlcnic_get_eeprom_len,
+ .get_eeprom = qlcnic_get_eeprom,
+ .get_ringparam = qlcnic_get_ringparam,
+ .set_ringparam = qlcnic_set_ringparam,
+ .get_channels = qlcnic_get_channels,
+ .get_pauseparam = qlcnic_get_pauseparam,
+ .get_wol = qlcnic_get_wol,
+ .get_strings = qlcnic_get_strings,
+ .get_ethtool_stats = qlcnic_get_ethtool_stats,
+ .get_sset_count = qlcnic_get_sset_count,
+ .get_coalesce = qlcnic_get_intr_coalesce,
+ .set_coalesce = qlcnic_set_intr_coalesce,
+ .set_msglevel = qlcnic_set_msglevel,
+ .get_msglevel = qlcnic_get_msglevel,
+ .get_link_ksettings = qlcnic_get_link_ksettings,
+};
+
+const struct ethtool_ops qlcnic_ethtool_failed_ops = {
+ .get_drvinfo = qlcnic_get_drvinfo,
+ .set_msglevel = qlcnic_set_msglevel,
+ .get_msglevel = qlcnic_get_msglevel,
+ .set_dump = qlcnic_set_dump,
+ .get_link_ksettings = qlcnic_get_link_ksettings,
+};
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
new file mode 100644
index 000000000..83a586d6f
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
@@ -0,0 +1,947 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ */
+
+#ifndef __QLCNIC_HDR_H_
+#define __QLCNIC_HDR_H_
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include "qlcnic_hw.h"
+
+/*
+ * The basic unit of access when reading/writing control registers.
+ */
+
+enum {
+ QLCNIC_HW_H0_CH_HUB_ADR = 0x05,
+ QLCNIC_HW_H1_CH_HUB_ADR = 0x0E,
+ QLCNIC_HW_H2_CH_HUB_ADR = 0x03,
+ QLCNIC_HW_H3_CH_HUB_ADR = 0x01,
+ QLCNIC_HW_H4_CH_HUB_ADR = 0x06,
+ QLCNIC_HW_H5_CH_HUB_ADR = 0x07,
+ QLCNIC_HW_H6_CH_HUB_ADR = 0x08
+};
+
+/* Hub 0 */
+enum {
+ QLCNIC_HW_MN_CRB_AGT_ADR = 0x15,
+ QLCNIC_HW_MS_CRB_AGT_ADR = 0x25
+};
+
+/* Hub 1 */
+enum {
+ QLCNIC_HW_PS_CRB_AGT_ADR = 0x73,
+ QLCNIC_HW_SS_CRB_AGT_ADR = 0x20,
+ QLCNIC_HW_RPMX3_CRB_AGT_ADR = 0x0b,
+ QLCNIC_HW_QMS_CRB_AGT_ADR = 0x00,
+ QLCNIC_HW_SQGS0_CRB_AGT_ADR = 0x01,
+ QLCNIC_HW_SQGS1_CRB_AGT_ADR = 0x02,
+ QLCNIC_HW_SQGS2_CRB_AGT_ADR = 0x03,
+ QLCNIC_HW_SQGS3_CRB_AGT_ADR = 0x04,
+ QLCNIC_HW_C2C0_CRB_AGT_ADR = 0x58,
+ QLCNIC_HW_C2C1_CRB_AGT_ADR = 0x59,
+ QLCNIC_HW_C2C2_CRB_AGT_ADR = 0x5a,
+ QLCNIC_HW_RPMX2_CRB_AGT_ADR = 0x0a,
+ QLCNIC_HW_RPMX4_CRB_AGT_ADR = 0x0c,
+ QLCNIC_HW_RPMX7_CRB_AGT_ADR = 0x0f,
+ QLCNIC_HW_RPMX9_CRB_AGT_ADR = 0x12,
+ QLCNIC_HW_SMB_CRB_AGT_ADR = 0x18
+};
+
+/* Hub 2 */
+enum {
+ QLCNIC_HW_NIU_CRB_AGT_ADR = 0x31,
+ QLCNIC_HW_I2C0_CRB_AGT_ADR = 0x19,
+ QLCNIC_HW_I2C1_CRB_AGT_ADR = 0x29,
+
+ QLCNIC_HW_SN_CRB_AGT_ADR = 0x10,
+ QLCNIC_HW_I2Q_CRB_AGT_ADR = 0x20,
+ QLCNIC_HW_LPC_CRB_AGT_ADR = 0x22,
+ QLCNIC_HW_ROMUSB_CRB_AGT_ADR = 0x21,
+ QLCNIC_HW_QM_CRB_AGT_ADR = 0x66,
+ QLCNIC_HW_SQG0_CRB_AGT_ADR = 0x60,
+ QLCNIC_HW_SQG1_CRB_AGT_ADR = 0x61,
+ QLCNIC_HW_SQG2_CRB_AGT_ADR = 0x62,
+ QLCNIC_HW_SQG3_CRB_AGT_ADR = 0x63,
+ QLCNIC_HW_RPMX1_CRB_AGT_ADR = 0x09,
+ QLCNIC_HW_RPMX5_CRB_AGT_ADR = 0x0d,
+ QLCNIC_HW_RPMX6_CRB_AGT_ADR = 0x0e,
+ QLCNIC_HW_RPMX8_CRB_AGT_ADR = 0x11
+};
+
+/* Hub 3 */
+enum {
+ QLCNIC_HW_PH_CRB_AGT_ADR = 0x1A,
+ QLCNIC_HW_SRE_CRB_AGT_ADR = 0x50,
+ QLCNIC_HW_EG_CRB_AGT_ADR = 0x51,
+ QLCNIC_HW_RPMX0_CRB_AGT_ADR = 0x08
+};
+
+/* Hub 4 */
+enum {
+ QLCNIC_HW_PEGN0_CRB_AGT_ADR = 0x40,
+ QLCNIC_HW_PEGN1_CRB_AGT_ADR,
+ QLCNIC_HW_PEGN2_CRB_AGT_ADR,
+ QLCNIC_HW_PEGN3_CRB_AGT_ADR,
+ QLCNIC_HW_PEGNI_CRB_AGT_ADR,
+ QLCNIC_HW_PEGND_CRB_AGT_ADR,
+ QLCNIC_HW_PEGNC_CRB_AGT_ADR,
+ QLCNIC_HW_PEGR0_CRB_AGT_ADR,
+ QLCNIC_HW_PEGR1_CRB_AGT_ADR,
+ QLCNIC_HW_PEGR2_CRB_AGT_ADR,
+ QLCNIC_HW_PEGR3_CRB_AGT_ADR,
+ QLCNIC_HW_PEGN4_CRB_AGT_ADR
+};
+
+/* Hub 5 */
+enum {
+ QLCNIC_HW_PEGS0_CRB_AGT_ADR = 0x40,
+ QLCNIC_HW_PEGS1_CRB_AGT_ADR,
+ QLCNIC_HW_PEGS2_CRB_AGT_ADR,
+ QLCNIC_HW_PEGS3_CRB_AGT_ADR,
+ QLCNIC_HW_PEGSI_CRB_AGT_ADR,
+ QLCNIC_HW_PEGSD_CRB_AGT_ADR,
+ QLCNIC_HW_PEGSC_CRB_AGT_ADR
+};
+
+/* Hub 6 */
+enum {
+ QLCNIC_HW_CAS0_CRB_AGT_ADR = 0x46,
+ QLCNIC_HW_CAS1_CRB_AGT_ADR = 0x47,
+ QLCNIC_HW_CAS2_CRB_AGT_ADR = 0x48,
+ QLCNIC_HW_CAS3_CRB_AGT_ADR = 0x49,
+ QLCNIC_HW_NCM_CRB_AGT_ADR = 0x16,
+ QLCNIC_HW_TMR_CRB_AGT_ADR = 0x17,
+ QLCNIC_HW_XDMA_CRB_AGT_ADR = 0x05,
+ QLCNIC_HW_OCM0_CRB_AGT_ADR = 0x06,
+ QLCNIC_HW_OCM1_CRB_AGT_ADR = 0x07
+};
+
+/* Floaters - non existent modules */
+#define QLCNIC_HW_EFC_RPMX0_CRB_AGT_ADR 0x67
+
+/* This field defines PCI/X adr [25:20] of agents on the CRB */
+enum {
+ QLCNIC_HW_PX_MAP_CRB_PH = 0,
+ QLCNIC_HW_PX_MAP_CRB_PS,
+ QLCNIC_HW_PX_MAP_CRB_MN,
+ QLCNIC_HW_PX_MAP_CRB_MS,
+ QLCNIC_HW_PX_MAP_CRB_PGR1,
+ QLCNIC_HW_PX_MAP_CRB_SRE,
+ QLCNIC_HW_PX_MAP_CRB_NIU,
+ QLCNIC_HW_PX_MAP_CRB_QMN,
+ QLCNIC_HW_PX_MAP_CRB_SQN0,
+ QLCNIC_HW_PX_MAP_CRB_SQN1,
+ QLCNIC_HW_PX_MAP_CRB_SQN2,
+ QLCNIC_HW_PX_MAP_CRB_SQN3,
+ QLCNIC_HW_PX_MAP_CRB_QMS,
+ QLCNIC_HW_PX_MAP_CRB_SQS0,
+ QLCNIC_HW_PX_MAP_CRB_SQS1,
+ QLCNIC_HW_PX_MAP_CRB_SQS2,
+ QLCNIC_HW_PX_MAP_CRB_SQS3,
+ QLCNIC_HW_PX_MAP_CRB_PGN0,
+ QLCNIC_HW_PX_MAP_CRB_PGN1,
+ QLCNIC_HW_PX_MAP_CRB_PGN2,
+ QLCNIC_HW_PX_MAP_CRB_PGN3,
+ QLCNIC_HW_PX_MAP_CRB_PGND,
+ QLCNIC_HW_PX_MAP_CRB_PGNI,
+ QLCNIC_HW_PX_MAP_CRB_PGS0,
+ QLCNIC_HW_PX_MAP_CRB_PGS1,
+ QLCNIC_HW_PX_MAP_CRB_PGS2,
+ QLCNIC_HW_PX_MAP_CRB_PGS3,
+ QLCNIC_HW_PX_MAP_CRB_PGSD,
+ QLCNIC_HW_PX_MAP_CRB_PGSI,
+ QLCNIC_HW_PX_MAP_CRB_SN,
+ QLCNIC_HW_PX_MAP_CRB_PGR2,
+ QLCNIC_HW_PX_MAP_CRB_EG,
+ QLCNIC_HW_PX_MAP_CRB_PH2,
+ QLCNIC_HW_PX_MAP_CRB_PS2,
+ QLCNIC_HW_PX_MAP_CRB_CAM,
+ QLCNIC_HW_PX_MAP_CRB_CAS0,
+ QLCNIC_HW_PX_MAP_CRB_CAS1,
+ QLCNIC_HW_PX_MAP_CRB_CAS2,
+ QLCNIC_HW_PX_MAP_CRB_C2C0,
+ QLCNIC_HW_PX_MAP_CRB_C2C1,
+ QLCNIC_HW_PX_MAP_CRB_TIMR,
+ QLCNIC_HW_PX_MAP_CRB_PGR3,
+ QLCNIC_HW_PX_MAP_CRB_RPMX1,
+ QLCNIC_HW_PX_MAP_CRB_RPMX2,
+ QLCNIC_HW_PX_MAP_CRB_RPMX3,
+ QLCNIC_HW_PX_MAP_CRB_RPMX4,
+ QLCNIC_HW_PX_MAP_CRB_RPMX5,
+ QLCNIC_HW_PX_MAP_CRB_RPMX6,
+ QLCNIC_HW_PX_MAP_CRB_RPMX7,
+ QLCNIC_HW_PX_MAP_CRB_XDMA,
+ QLCNIC_HW_PX_MAP_CRB_I2Q,
+ QLCNIC_HW_PX_MAP_CRB_ROMUSB,
+ QLCNIC_HW_PX_MAP_CRB_CAS3,
+ QLCNIC_HW_PX_MAP_CRB_RPMX0,
+ QLCNIC_HW_PX_MAP_CRB_RPMX8,
+ QLCNIC_HW_PX_MAP_CRB_RPMX9,
+ QLCNIC_HW_PX_MAP_CRB_OCM0,
+ QLCNIC_HW_PX_MAP_CRB_OCM1,
+ QLCNIC_HW_PX_MAP_CRB_SMB,
+ QLCNIC_HW_PX_MAP_CRB_I2C0,
+ QLCNIC_HW_PX_MAP_CRB_I2C1,
+ QLCNIC_HW_PX_MAP_CRB_LPC,
+ QLCNIC_HW_PX_MAP_CRB_PGNC,
+ QLCNIC_HW_PX_MAP_CRB_PGR0
+};
+
+#define BIT_0 0x1
+#define BIT_1 0x2
+#define BIT_2 0x4
+#define BIT_3 0x8
+#define BIT_4 0x10
+#define BIT_5 0x20
+#define BIT_6 0x40
+#define BIT_7 0x80
+#define BIT_8 0x100
+#define BIT_9 0x200
+#define BIT_10 0x400
+#define BIT_11 0x800
+#define BIT_12 0x1000
+#define BIT_13 0x2000
+#define BIT_14 0x4000
+#define BIT_15 0x8000
+#define BIT_16 0x10000
+#define BIT_17 0x20000
+#define BIT_18 0x40000
+#define BIT_19 0x80000
+#define BIT_20 0x100000
+#define BIT_21 0x200000
+#define BIT_22 0x400000
+#define BIT_23 0x800000
+#define BIT_24 0x1000000
+#define BIT_25 0x2000000
+#define BIT_26 0x4000000
+#define BIT_27 0x8000000
+#define BIT_28 0x10000000
+#define BIT_29 0x20000000
+#define BIT_30 0x40000000
+#define BIT_31 0x80000000
+
+/* This field defines CRB adr [31:20] of the agents */
+
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_MN \
+ ((QLCNIC_HW_H0_CH_HUB_ADR << 7) | QLCNIC_HW_MN_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PH \
+ ((QLCNIC_HW_H0_CH_HUB_ADR << 7) | QLCNIC_HW_PH_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_MS \
+ ((QLCNIC_HW_H0_CH_HUB_ADR << 7) | QLCNIC_HW_MS_CRB_AGT_ADR)
+
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PS \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_PS_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SS \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SS_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX3 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX3_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_QMS \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_QMS_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS0 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS1 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS2 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS2_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS3 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS3_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_C2C0 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_C2C0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_C2C1 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_C2C1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX2 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX2_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX4 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX4_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX7 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX7_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX9 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX9_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SMB \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SMB_CRB_AGT_ADR)
+
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_NIU \
+ ((QLCNIC_HW_H2_CH_HUB_ADR << 7) | QLCNIC_HW_NIU_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_I2C0 \
+ ((QLCNIC_HW_H2_CH_HUB_ADR << 7) | QLCNIC_HW_I2C0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_I2C1 \
+ ((QLCNIC_HW_H2_CH_HUB_ADR << 7) | QLCNIC_HW_I2C1_CRB_AGT_ADR)
+
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SRE \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SRE_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_EG \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_EG_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX0 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_QMN \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_QM_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN0 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN1 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN2 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG2_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN3 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG3_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX1 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX5 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX5_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX6 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX6_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX8 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX8_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS0 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS1 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS2 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS2_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS3 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS3_CRB_AGT_ADR)
+
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGNI \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGNI_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGND \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGND_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN0 \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN1 \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN2 \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN2_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN3 \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN3_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN4 \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN4_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGNC \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGNC_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR0 \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR1 \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR2 \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR2_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR3 \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR3_CRB_AGT_ADR)
+
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGSI \
+ ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGSI_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGSD \
+ ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGSD_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS0 \
+ ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS1 \
+ ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS2 \
+ ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS2_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS3 \
+ ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS3_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGSC \
+ ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGSC_CRB_AGT_ADR)
+
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAM \
+ ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_NCM_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR \
+ ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_TMR_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA \
+ ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_XDMA_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SN \
+ ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_SN_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q \
+ ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_I2Q_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB \
+ ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_ROMUSB_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_OCM0 \
+ ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_OCM0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_OCM1 \
+ ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_OCM1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_LPC \
+ ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_LPC_CRB_AGT_ADR)
+
+#define QLCNIC_SRE_MISC (QLCNIC_CRB_SRE + 0x0002c)
+
+#define QLCNIC_I2Q_CLR_PCI_HI (QLCNIC_CRB_I2Q + 0x00034)
+
+#define ROMUSB_GLB (QLCNIC_CRB_ROMUSB + 0x00000)
+#define ROMUSB_ROM (QLCNIC_CRB_ROMUSB + 0x10000)
+
+#define QLCNIC_ROMUSB_GLB_STATUS (ROMUSB_GLB + 0x0004)
+#define QLCNIC_ROMUSB_GLB_SW_RESET (ROMUSB_GLB + 0x0008)
+#define QLCNIC_ROMUSB_GLB_PAD_GPIO_I (ROMUSB_GLB + 0x000c)
+#define QLCNIC_ROMUSB_GLB_CAS_RST (ROMUSB_GLB + 0x0038)
+#define QLCNIC_ROMUSB_GLB_TEST_MUX_SEL (ROMUSB_GLB + 0x0044)
+#define QLCNIC_ROMUSB_GLB_PEGTUNE_DONE (ROMUSB_GLB + 0x005c)
+#define QLCNIC_ROMUSB_GLB_CHIP_CLK_CTRL (ROMUSB_GLB + 0x00A8)
+
+#define QLCNIC_ROMUSB_GPIO(n) (ROMUSB_GLB + 0x60 + (4 * (n)))
+
+#define QLCNIC_ROMUSB_ROM_INSTR_OPCODE (ROMUSB_ROM + 0x0004)
+#define QLCNIC_ROMUSB_ROM_ADDRESS (ROMUSB_ROM + 0x0008)
+#define QLCNIC_ROMUSB_ROM_WDATA (ROMUSB_ROM + 0x000c)
+#define QLCNIC_ROMUSB_ROM_ABYTE_CNT (ROMUSB_ROM + 0x0010)
+#define QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT (ROMUSB_ROM + 0x0014)
+#define QLCNIC_ROMUSB_ROM_RDATA (ROMUSB_ROM + 0x0018)
+
+/******************************************************************************
+*
+* Definitions specific to M25P flash
+*
+*******************************************************************************
+*/
+
+/* all are 1MB windows */
+
+#define QLCNIC_PCI_CRB_WINDOWSIZE 0x00100000
+#define QLCNIC_PCI_CRB_WINDOW(A) \
+ (QLCNIC_PCI_CRBSPACE + (A)*QLCNIC_PCI_CRB_WINDOWSIZE)
+
+#define QLCNIC_CRB_NIU QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_NIU)
+#define QLCNIC_CRB_SRE QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SRE)
+#define QLCNIC_CRB_ROMUSB \
+ QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_ROMUSB)
+#define QLCNIC_CRB_EPG QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_EG)
+#define QLCNIC_CRB_I2Q QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_I2Q)
+#define QLCNIC_CRB_TIMER QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_TIMR)
+#define QLCNIC_CRB_I2C0 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_I2C0)
+#define QLCNIC_CRB_SMB QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SMB)
+#define QLCNIC_CRB_MAX QLCNIC_PCI_CRB_WINDOW(64)
+
+#define QLCNIC_CRB_PCIX_HOST QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PH)
+#define QLCNIC_CRB_PCIX_HOST2 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PH2)
+#define QLCNIC_CRB_PEG_NET_0 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN0)
+#define QLCNIC_CRB_PEG_NET_1 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN1)
+#define QLCNIC_CRB_PEG_NET_2 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN2)
+#define QLCNIC_CRB_PEG_NET_3 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN3)
+#define QLCNIC_CRB_PEG_NET_4 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SQS2)
+#define QLCNIC_CRB_PEG_NET_D QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGND)
+#define QLCNIC_CRB_PEG_NET_I QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGNI)
+#define QLCNIC_CRB_DDR_NET QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_MN)
+#define QLCNIC_CRB_QDR_NET QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SN)
+
+#define QLCNIC_CRB_PCIX_MD QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PS)
+#define QLCNIC_CRB_PCIE QLCNIC_CRB_PCIX_MD
+
+#define ISR_INT_VECTOR (QLCNIC_PCIX_PS_REG(PCIX_INT_VECTOR))
+#define ISR_INT_MASK (QLCNIC_PCIX_PS_REG(PCIX_INT_MASK))
+#define ISR_INT_MASK_SLOW (QLCNIC_PCIX_PS_REG(PCIX_INT_MASK))
+#define ISR_INT_TARGET_STATUS (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS))
+#define ISR_INT_TARGET_MASK (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK))
+#define ISR_INT_TARGET_STATUS_F1 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F1))
+#define ISR_INT_TARGET_MASK_F1 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F1))
+#define ISR_INT_TARGET_STATUS_F2 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F2))
+#define ISR_INT_TARGET_MASK_F2 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F2))
+#define ISR_INT_TARGET_STATUS_F3 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F3))
+#define ISR_INT_TARGET_MASK_F3 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F3))
+#define ISR_INT_TARGET_STATUS_F4 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F4))
+#define ISR_INT_TARGET_MASK_F4 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F4))
+#define ISR_INT_TARGET_STATUS_F5 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F5))
+#define ISR_INT_TARGET_MASK_F5 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F5))
+#define ISR_INT_TARGET_STATUS_F6 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F6))
+#define ISR_INT_TARGET_MASK_F6 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F6))
+#define ISR_INT_TARGET_STATUS_F7 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F7))
+#define ISR_INT_TARGET_MASK_F7 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F7))
+
+#define QLCNIC_PCI_OCM0_2M (0x000c0000UL)
+#define QLCNIC_PCI_CRBSPACE (0x06000000UL)
+#define QLCNIC_PCI_CAMQM (0x04800000UL)
+#define QLCNIC_PCI_CAMQM_END (0x04800800UL)
+#define QLCNIC_PCI_CAMQM_2M_BASE (0x000ff800UL)
+
+#define QLCNIC_CRB_CAM QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_CAM)
+
+#define QLCNIC_ADDR_DDR_NET (0x0000000000000000ULL)
+#define QLCNIC_ADDR_DDR_NET_MAX (0x000000000fffffffULL)
+#define QLCNIC_ADDR_OCM0 (0x0000000200000000ULL)
+#define QLCNIC_ADDR_OCM0_MAX (0x00000002000fffffULL)
+#define QLCNIC_ADDR_OCM1 (0x0000000200400000ULL)
+#define QLCNIC_ADDR_OCM1_MAX (0x00000002004fffffULL)
+#define QLCNIC_ADDR_QDR_NET (0x0000000300000000ULL)
+#define QLCNIC_ADDR_QDR_NET_MAX (0x0000000307ffffffULL)
+
+/*
+ * Register offsets for MN
+ */
+#define QLCNIC_MIU_CONTROL (0x000)
+#define QLCNIC_MIU_MN_CONTROL (QLCNIC_CRB_DDR_NET+QLCNIC_MIU_CONTROL)
+
+/* 200ms delay in each loop */
+#define QLCNIC_NIU_PHY_WAITLEN 200000
+/* 10 seconds before we give up */
+#define QLCNIC_NIU_PHY_WAITMAX 50
+#define QLCNIC_NIU_MAX_GBE_PORTS 4
+#define QLCNIC_NIU_MAX_XG_PORTS 2
+
+#define QLCNIC_NIU_MODE (QLCNIC_CRB_NIU + 0x00000)
+#define QLCNIC_NIU_GB_PAUSE_CTL (QLCNIC_CRB_NIU + 0x0030c)
+#define QLCNIC_NIU_XG_PAUSE_CTL (QLCNIC_CRB_NIU + 0x00098)
+
+#define QLCNIC_NIU_GB_MAC_CONFIG_0(I) \
+ (QLCNIC_CRB_NIU + 0x30000 + (I)*0x10000)
+#define QLCNIC_NIU_GB_MAC_CONFIG_1(I) \
+ (QLCNIC_CRB_NIU + 0x30004 + (I)*0x10000)
+
+#define MAX_CTL_CHECK 1000
+#define TEST_AGT_CTRL (0x00)
+
+#define TA_CTL_START BIT_0
+#define TA_CTL_ENABLE BIT_1
+#define TA_CTL_WRITE BIT_2
+#define TA_CTL_BUSY BIT_3
+
+/* XG Link status */
+#define XG_LINK_UP 0x10
+#define XG_LINK_DOWN 0x20
+
+#define XG_LINK_UP_P3P 0x01
+#define XG_LINK_DOWN_P3P 0x02
+#define XG_LINK_STATE_P3P_MASK 0xf
+#define XG_LINK_STATE_P3P(pcifn, val) \
+ (((val) >> ((pcifn) * 4)) & XG_LINK_STATE_P3P_MASK)
+
+#define P3P_LINK_SPEED_MHZ 100
+#define P3P_LINK_SPEED_MASK 0xff
+#define P3P_LINK_SPEED_REG(pcifn) \
+ (CRB_PF_LINK_SPEED_1 + (((pcifn) / 4) * 4))
+#define P3P_LINK_SPEED_VAL(pcifn, reg) \
+ (((reg) >> (8 * ((pcifn) & 0x3))) & P3P_LINK_SPEED_MASK)
+
+#define QLCNIC_CAM_RAM_BASE (QLCNIC_CRB_CAM + 0x02000)
+#define QLCNIC_CAM_RAM(reg) (QLCNIC_CAM_RAM_BASE + (reg))
+#define QLCNIC_ROM_LOCK_ID (QLCNIC_CAM_RAM(0x100))
+#define QLCNIC_PHY_LOCK_ID (QLCNIC_CAM_RAM(0x120))
+#define QLCNIC_CRB_WIN_LOCK_ID (QLCNIC_CAM_RAM(0x124))
+
+#define NIC_CRB_BASE (QLCNIC_CAM_RAM(0x200))
+#define NIC_CRB_BASE_2 (QLCNIC_CAM_RAM(0x700))
+#define QLCNIC_REG(X) (NIC_CRB_BASE+(X))
+#define QLCNIC_REG_2(X) (NIC_CRB_BASE_2+(X))
+
+#define QLCNIC_CDRP_MAX_ARGS 4
+#define QLCNIC_CDRP_ARG(i) (QLCNIC_REG(0x18 + ((i) * 4)))
+
+#define QLCNIC_CDRP_CRB_OFFSET (QLCNIC_REG(0x18))
+#define QLCNIC_SIGN_CRB_OFFSET (QLCNIC_REG(0x28))
+
+#define CRB_XG_STATE_P3P (QLCNIC_REG(0x98))
+#define CRB_PF_LINK_SPEED_1 (QLCNIC_REG(0xe8))
+#define CRB_DRIVER_VERSION (QLCNIC_REG(0x2a0))
+
+#define CRB_FW_CAPABILITIES_2 (QLCNIC_CAM_RAM(0x12c))
+
+/*
+ * CrbPortPhanCntrHi/Lo is used to pass the address of HostPhantomIndex address
+ * which can be read by the Phantom host to get producer/consumer indexes from
+ * Phantom/Casper. If it is not HOST_SHARED_MEMORY, then the following
+ * registers will be used for the addresses of the ring's shared memory
+ * on the Phantom.
+ */
+
+#define qlcnic_get_temp_val(x) ((x) >> 16)
+#define qlcnic_get_temp_state(x) ((x) & 0xffff)
+#define qlcnic_encode_temp(val, state) (((val) << 16) | (state))
+
+/*
+ * Temperature control.
+ */
+enum {
+ QLCNIC_TEMP_NORMAL = 0x1, /* Normal operating range */
+ QLCNIC_TEMP_WARN, /* Sound alert, temperature getting high */
+ QLCNIC_TEMP_PANIC /* Fatal error, hardware has shut down. */
+};
+
+
+/* Lock IDs for PHY lock */
+#define PHY_LOCK_DRIVER 0x44524956
+
+#define PCIX_INT_VECTOR (0x10100)
+#define PCIX_INT_MASK (0x10104)
+
+#define PCIX_OCM_WINDOW (0x10800)
+#define PCIX_OCM_WINDOW_REG(func) (PCIX_OCM_WINDOW + 0x4 * (func))
+
+#define PCIX_TARGET_STATUS (0x10118)
+#define PCIX_TARGET_STATUS_F1 (0x10160)
+#define PCIX_TARGET_STATUS_F2 (0x10164)
+#define PCIX_TARGET_STATUS_F3 (0x10168)
+#define PCIX_TARGET_STATUS_F4 (0x10360)
+#define PCIX_TARGET_STATUS_F5 (0x10364)
+#define PCIX_TARGET_STATUS_F6 (0x10368)
+#define PCIX_TARGET_STATUS_F7 (0x1036c)
+
+#define PCIX_TARGET_MASK (0x10128)
+#define PCIX_TARGET_MASK_F1 (0x10170)
+#define PCIX_TARGET_MASK_F2 (0x10174)
+#define PCIX_TARGET_MASK_F3 (0x10178)
+#define PCIX_TARGET_MASK_F4 (0x10370)
+#define PCIX_TARGET_MASK_F5 (0x10374)
+#define PCIX_TARGET_MASK_F6 (0x10378)
+#define PCIX_TARGET_MASK_F7 (0x1037c)
+
+#define PCIX_MSI_F(i) (0x13000+((i)*4))
+
+#define QLCNIC_PCIX_PH_REG(reg) (QLCNIC_CRB_PCIE + (reg))
+#define QLCNIC_PCIX_PS_REG(reg) (QLCNIC_CRB_PCIX_MD + (reg))
+#define QLCNIC_PCIE_REG(reg) (QLCNIC_CRB_PCIE + (reg))
+
+#define PCIE_SEM0_LOCK (0x1c000)
+#define PCIE_SEM0_UNLOCK (0x1c004)
+#define PCIE_SEM_LOCK(N) (PCIE_SEM0_LOCK + 8*(N))
+#define PCIE_SEM_UNLOCK(N) (PCIE_SEM0_UNLOCK + 8*(N))
+
+#define PCIE_SETUP_FUNCTION (0x12040)
+#define PCIE_SETUP_FUNCTION2 (0x12048)
+#define PCIE_MISCCFG_RC (0x1206c)
+#define PCIE_TGT_SPLIT_CHICKEN (0x12080)
+#define PCIE_CHICKEN3 (0x120c8)
+
+#define ISR_INT_STATE_REG (QLCNIC_PCIX_PS_REG(PCIE_MISCCFG_RC))
+#define PCIE_MAX_MASTER_SPLIT (0x14048)
+
+#define QLCNIC_PORT_MODE_NONE 0
+#define QLCNIC_PORT_MODE_XG 1
+#define QLCNIC_PORT_MODE_GB 2
+#define QLCNIC_PORT_MODE_802_3_AP 3
+#define QLCNIC_PORT_MODE_AUTO_NEG 4
+#define QLCNIC_PORT_MODE_AUTO_NEG_1G 5
+#define QLCNIC_PORT_MODE_AUTO_NEG_XG 6
+#define QLCNIC_PORT_MODE_ADDR (QLCNIC_CAM_RAM(0x24))
+#define QLCNIC_WOL_PORT_MODE (QLCNIC_CAM_RAM(0x198))
+
+#define QLCNIC_WOL_CONFIG_NV (QLCNIC_CAM_RAM(0x184))
+#define QLCNIC_WOL_CONFIG (QLCNIC_CAM_RAM(0x188))
+
+#define QLCNIC_PEG_TUNE_MN_PRESENT 0x1
+#define QLCNIC_PEG_TUNE_CAPABILITY (QLCNIC_CAM_RAM(0x02c))
+
+#define QLCNIC_DMA_WATCHDOG_CTRL (QLCNIC_CAM_RAM(0x14))
+#define QLCNIC_ROM_DEV_INIT_TIMEOUT (0x3e885c)
+#define QLCNIC_ROM_DRV_RESET_TIMEOUT (0x3e8860)
+
+/* Device State */
+#define QLCNIC_DEV_COLD 0x1
+#define QLCNIC_DEV_INITIALIZING 0x2
+#define QLCNIC_DEV_READY 0x3
+#define QLCNIC_DEV_NEED_RESET 0x4
+#define QLCNIC_DEV_NEED_QUISCENT 0x5
+#define QLCNIC_DEV_FAILED 0x6
+#define QLCNIC_DEV_QUISCENT 0x7
+
+#define QLCNIC_DEV_BADBAD 0xbad0bad0
+
+#define QLCNIC_DEV_NPAR_NON_OPER 0 /* NON Operational */
+#define QLCNIC_DEV_NPAR_OPER 1 /* NPAR Operational */
+#define QLCNIC_DEV_NPAR_OPER_TIMEO 30 /* Operational time out */
+
+#define QLC_DEV_SET_REF_CNT(VAL, FN) ((VAL) |= (1 << (FN * 4)))
+#define QLC_DEV_CLR_REF_CNT(VAL, FN) ((VAL) &= ~(1 << (FN * 4)))
+#define QLC_DEV_SET_RST_RDY(VAL, FN) ((VAL) |= (1 << (FN * 4)))
+#define QLC_DEV_SET_QSCNT_RDY(VAL, FN) ((VAL) |= (2 << (FN * 4)))
+#define QLC_DEV_CLR_RST_QSCNT(VAL, FN) ((VAL) &= ~(3 << (FN * 4)))
+
+#define QLC_DEV_GET_DRV(VAL, FN) (0xf & ((VAL) >> (FN * 4)))
+#define QLC_DEV_SET_DRV(VAL, FN) ((VAL) << (FN * 4))
+
+#define QLCNIC_TYPE_NIC 1
+#define QLCNIC_TYPE_FCOE 2
+#define QLCNIC_TYPE_ISCSI 3
+
+#define QLCNIC_RCODE_DRIVER_INFO 0x20000000
+#define QLCNIC_RCODE_DRIVER_CAN_RELOAD BIT_30
+#define QLCNIC_RCODE_FATAL_ERROR BIT_31
+#define QLCNIC_FWERROR_PEGNUM(code) ((code) & 0xff)
+#define QLCNIC_FWERROR_CODE(code) ((code >> 8) & 0x1fffff)
+#define QLCNIC_FWERROR_FAN_FAILURE 0x16
+
+#define FW_POLL_DELAY (1 * HZ)
+#define FW_FAIL_THRESH 2
+
+#define QLCNIC_RESET_TIMEOUT_SECS 10
+#define QLCNIC_INIT_TIMEOUT_SECS 30
+#define QLCNIC_RCVPEG_CHECK_RETRY_COUNT 2000
+#define QLCNIC_RCVPEG_CHECK_DELAY 10
+#define QLCNIC_CMDPEG_CHECK_RETRY_COUNT 60
+#define QLCNIC_CMDPEG_CHECK_DELAY 500
+#define QLCNIC_HEARTBEAT_PERIOD_MSECS 200
+#define QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT 10
+
+#define QLCNIC_MAX_MC_COUNT 38
+#define QLCNIC_MAX_UC_COUNT 512
+#define QLCNIC_WATCHDOG_TIMEOUTVALUE 5
+
+#define ISR_MSI_INT_TRIGGER(FUNC) (QLCNIC_PCIX_PS_REG(PCIX_MSI_F(FUNC)))
+#define ISR_LEGACY_INT_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200)
+
+/*
+ * PCI Interrupt Vector Values.
+ */
+#define PCIX_INT_VECTOR_BIT_F0 0x0080
+#define PCIX_INT_VECTOR_BIT_F1 0x0100
+#define PCIX_INT_VECTOR_BIT_F2 0x0200
+#define PCIX_INT_VECTOR_BIT_F3 0x0400
+#define PCIX_INT_VECTOR_BIT_F4 0x0800
+#define PCIX_INT_VECTOR_BIT_F5 0x1000
+#define PCIX_INT_VECTOR_BIT_F6 0x2000
+#define PCIX_INT_VECTOR_BIT_F7 0x4000
+
+struct qlcnic_legacy_intr_set {
+ u32 int_vec_bit;
+ u32 tgt_status_reg;
+ u32 tgt_mask_reg;
+ u32 pci_int_reg;
+};
+
+#define QLCNIC_MSIX_BASE 0x132110
+#define QLCNIC_MAX_VLAN_FILTERS 64
+
+#define FLASH_ROM_WINDOW 0x42110030
+#define FLASH_ROM_DATA 0x42150000
+
+#define QLCNIC_FW_DUMP_REG1 0x00130060
+#define QLCNIC_FW_DUMP_REG2 0x001e0000
+#define QLCNIC_FLASH_SEM2_LK 0x0013C010
+#define QLCNIC_FLASH_SEM2_ULK 0x0013C014
+#define QLCNIC_FLASH_LOCK_ID 0x001B2100
+
+/* PCI function operational mode */
+enum {
+ QLCNIC_MGMT_FUNC = 0,
+ QLCNIC_PRIV_FUNC = 1,
+ QLCNIC_NON_PRIV_FUNC = 2,
+ QLCNIC_SRIOV_PF_FUNC = 3,
+ QLCNIC_SRIOV_VF_FUNC = 4,
+ QLCNIC_UNKNOWN_FUNC_MODE = 5
+};
+
+enum {
+ QLCNIC_PORT_DEFAULTS = 0,
+ QLCNIC_ADD_VLAN = 1,
+ QLCNIC_DEL_VLAN = 2
+};
+
+#define QLC_DEV_DRV_DEFAULT 0x11111111
+
+#define LSB(x) ((uint8_t)(x))
+#define MSB(x) ((uint8_t)((uint16_t)(x) >> 8))
+
+#define LSW(x) ((uint16_t)((uint32_t)(x)))
+#define MSW(x) ((uint16_t)((uint32_t)(x) >> 16))
+
+#define LSD(x) ((uint32_t)((uint64_t)(x)))
+#define MSD(x) ((uint32_t)((((uint64_t)(x)) >> 16) >> 16))
+
+#define QLCNIC_MS_CTRL 0x41000090
+#define QLCNIC_MS_ADDR_LO 0x41000094
+#define QLCNIC_MS_ADDR_HI 0x41000098
+#define QLCNIC_MS_WRTDATA_LO 0x410000A0
+#define QLCNIC_MS_WRTDATA_HI 0x410000A4
+#define QLCNIC_MS_WRTDATA_ULO 0x410000B0
+#define QLCNIC_MS_WRTDATA_UHI 0x410000B4
+#define QLCNIC_MS_RDDATA_LO 0x410000A8
+#define QLCNIC_MS_RDDATA_HI 0x410000AC
+#define QLCNIC_MS_RDDATA_ULO 0x410000B8
+#define QLCNIC_MS_RDDATA_UHI 0x410000BC
+
+#define QLCNIC_TA_WRITE_ENABLE (TA_CTL_ENABLE | TA_CTL_WRITE)
+#define QLCNIC_TA_WRITE_START (TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE)
+#define QLCNIC_TA_START_ENABLE (TA_CTL_START | TA_CTL_ENABLE)
+
+#define QLCNIC_LEGACY_INTR_CONFIG \
+{ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F0, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK, }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F1, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F1, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F1, }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F2, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F2, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F2, }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F3, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F3, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F3, }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F4, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F4, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F4, }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F5, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F5, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F5, }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F6, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F6, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F6, }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F7, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F7, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F7, }, \
+}
+
+/* NIU REGS */
+
+#define _qlcnic_crb_get_bit(var, bit) ((var >> bit) & 0x1)
+
+/*
+ * NIU GB MAC Config Register 0 (applies to GB0, GB1, GB2, GB3)
+ *
+ * Bit 0 : enable_tx => 1:enable frame xmit, 0:disable
+ * Bit 1 : tx_synced => R/O: xmit enable synched to xmit stream
+ * Bit 2 : enable_rx => 1:enable frame recv, 0:disable
+ * Bit 3 : rx_synced => R/O: recv enable synched to recv stream
+ * Bit 4 : tx_flowctl => 1:enable pause frame generation, 0:disable
+ * Bit 5 : rx_flowctl => 1:act on recv'd pause frames, 0:ignore
+ * Bit 8 : loopback => 1:loop MAC xmits to MAC recvs, 0:normal
+ * Bit 16: tx_reset_pb => 1:reset frame xmit protocol blk, 0:no-op
+ * Bit 17: rx_reset_pb => 1:reset frame recv protocol blk, 0:no-op
+ * Bit 18: tx_reset_mac => 1:reset data/ctl multiplexer blk, 0:no-op
+ * Bit 19: rx_reset_mac => 1:reset ctl frames & timers blk, 0:no-op
+ * Bit 31: soft_reset => 1:reset the MAC and the SERDES, 0:no-op
+ */
+#define qlcnic_gb_rx_flowctl(config_word) \
+ ((config_word) |= 1 << 5)
+#define qlcnic_gb_get_rx_flowctl(config_word) \
+ _qlcnic_crb_get_bit((config_word), 5)
+#define qlcnic_gb_unset_rx_flowctl(config_word) \
+ ((config_word) &= ~(1 << 5))
+
+/*
+ * NIU GB Pause Ctl Register
+ */
+
+#define qlcnic_gb_set_gb0_mask(config_word) \
+ ((config_word) |= 1 << 0)
+#define qlcnic_gb_set_gb1_mask(config_word) \
+ ((config_word) |= 1 << 2)
+#define qlcnic_gb_set_gb2_mask(config_word) \
+ ((config_word) |= 1 << 4)
+#define qlcnic_gb_set_gb3_mask(config_word) \
+ ((config_word) |= 1 << 6)
+
+#define qlcnic_gb_get_gb0_mask(config_word) \
+ _qlcnic_crb_get_bit((config_word), 0)
+#define qlcnic_gb_get_gb1_mask(config_word) \
+ _qlcnic_crb_get_bit((config_word), 2)
+#define qlcnic_gb_get_gb2_mask(config_word) \
+ _qlcnic_crb_get_bit((config_word), 4)
+#define qlcnic_gb_get_gb3_mask(config_word) \
+ _qlcnic_crb_get_bit((config_word), 6)
+
+#define qlcnic_gb_unset_gb0_mask(config_word) \
+ ((config_word) &= ~(1 << 0))
+#define qlcnic_gb_unset_gb1_mask(config_word) \
+ ((config_word) &= ~(1 << 2))
+#define qlcnic_gb_unset_gb2_mask(config_word) \
+ ((config_word) &= ~(1 << 4))
+#define qlcnic_gb_unset_gb3_mask(config_word) \
+ ((config_word) &= ~(1 << 6))
+
+/*
+ * NIU XG Pause Ctl Register
+ *
+ * Bit 0 : xg0_mask => 1:disable tx pause frames
+ * Bit 1 : xg0_request => 1:request single pause frame
+ * Bit 2 : xg0_on_off => 1:request is pause on, 0:off
+ * Bit 3 : xg1_mask => 1:disable tx pause frames
+ * Bit 4 : xg1_request => 1:request single pause frame
+ * Bit 5 : xg1_on_off => 1:request is pause on, 0:off
+ */
+
+#define qlcnic_xg_set_xg0_mask(config_word) \
+ ((config_word) |= 1 << 0)
+#define qlcnic_xg_set_xg1_mask(config_word) \
+ ((config_word) |= 1 << 3)
+
+#define qlcnic_xg_get_xg0_mask(config_word) \
+ _qlcnic_crb_get_bit((config_word), 0)
+#define qlcnic_xg_get_xg1_mask(config_word) \
+ _qlcnic_crb_get_bit((config_word), 3)
+
+#define qlcnic_xg_unset_xg0_mask(config_word) \
+ ((config_word) &= ~(1 << 0))
+#define qlcnic_xg_unset_xg1_mask(config_word) \
+ ((config_word) &= ~(1 << 3))
+
+/*
+ * NIU XG Pause Ctl Register
+ *
+ * Bit 0 : xg0_mask => 1:disable tx pause frames
+ * Bit 1 : xg0_request => 1:request single pause frame
+ * Bit 2 : xg0_on_off => 1:request is pause on, 0:off
+ * Bit 3 : xg1_mask => 1:disable tx pause frames
+ * Bit 4 : xg1_request => 1:request single pause frame
+ * Bit 5 : xg1_on_off => 1:request is pause on, 0:off
+ */
+
+/*
+ * PHY-Specific MII control/status registers.
+ */
+#define QLCNIC_NIU_GB_MII_MGMT_ADDR_AUTONEG 4
+#define QLCNIC_NIU_GB_MII_MGMT_ADDR_PHY_STATUS 17
+
+/*
+ * PHY-Specific Status Register (reg 17).
+ *
+ * Bit 0 : jabber => 1:jabber detected, 0:not
+ * Bit 1 : polarity => 1:polarity reversed, 0:normal
+ * Bit 2 : recvpause => 1:receive pause enabled, 0:disabled
+ * Bit 3 : xmitpause => 1:transmit pause enabled, 0:disabled
+ * Bit 4 : energydetect => 1:sleep, 0:active
+ * Bit 5 : downshift => 1:downshift, 0:no downshift
+ * Bit 6 : crossover => 1:MDIX (crossover), 0:MDI (no crossover)
+ * Bits 7-9 : cablelen => not valid in 10Mb/s mode
+ * 0:<50m, 1:50-80m, 2:80-110m, 3:110-140m, 4:>140m
+ * Bit 10 : link => 1:link up, 0:link down
+ * Bit 11 : resolved => 1:speed and duplex resolved, 0:not yet
+ * Bit 12 : pagercvd => 1:page received, 0:page not received
+ * Bit 13 : duplex => 1:full duplex, 0:half duplex
+ * Bits 14-15 : speed => 0:10Mb/s, 1:100Mb/s, 2:1000Mb/s, 3:rsvd
+ */
+
+#define qlcnic_get_phy_speed(config_word) (((config_word) >> 14) & 0x03)
+
+#define qlcnic_set_phy_speed(config_word, val) \
+ ((config_word) |= ((val & 0x03) << 14))
+#define qlcnic_set_phy_duplex(config_word) \
+ ((config_word) |= 1 << 13)
+#define qlcnic_clear_phy_duplex(config_word) \
+ ((config_word) &= ~(1 << 13))
+
+#define qlcnic_get_phy_link(config_word) \
+ _qlcnic_crb_get_bit(config_word, 10)
+#define qlcnic_get_phy_duplex(config_word) \
+ _qlcnic_crb_get_bit(config_word, 13)
+
+#define QLCNIC_NIU_NON_PROMISC_MODE 0
+#define QLCNIC_NIU_PROMISC_MODE 1
+#define QLCNIC_NIU_ALLMULTI_MODE 2
+
+#define QLCNIC_PCIE_SEM_TIMEOUT 10000
+
+struct crb_128M_2M_sub_block_map {
+ unsigned valid;
+ unsigned start_128M;
+ unsigned end_128M;
+ unsigned start_2M;
+};
+
+struct crb_128M_2M_block_map{
+ struct crb_128M_2M_sub_block_map sub_block[16];
+};
+#endif /* __QLCNIC_HDR_H_ */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
new file mode 100644
index 000000000..4b8bc46f5
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -0,0 +1,1685 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ */
+
+#include <linux/slab.h>
+#include <net/ip.h>
+#include <linux/bitops.h>
+
+#include "qlcnic.h"
+#include "qlcnic_hdr.h"
+
+#define MASK(n) ((1ULL<<(n))-1)
+#define OCM_WIN_P3P(addr) (addr & 0xffc0000)
+
+#define GET_MEM_OFFS_2M(addr) (addr & MASK(18))
+
+#define CRB_BLK(off) ((off >> 20) & 0x3f)
+#define CRB_SUBBLK(off) ((off >> 16) & 0xf)
+#define CRB_WINDOW_2M (0x130060)
+#define CRB_HI(off) ((crb_hub_agt[CRB_BLK(off)] << 20) | ((off) & 0xf0000))
+#define CRB_INDIRECT_2M (0x1e0000UL)
+
+struct qlcnic_ms_reg_ctrl {
+ u32 ocm_window;
+ u32 control;
+ u32 hi;
+ u32 low;
+ u32 rd[4];
+ u32 wd[4];
+ u64 off;
+};
+
+#ifndef readq
+static inline u64 readq(void __iomem *addr)
+{
+ return readl(addr) | (((u64) readl(addr + 4)) << 32LL);
+}
+#endif
+
+#ifndef writeq
+static inline void writeq(u64 val, void __iomem *addr)
+{
+ writel(((u32) (val)), (addr));
+ writel(((u32) (val >> 32)), (addr + 4));
+}
+#endif
+
+static struct crb_128M_2M_block_map
+crb_128M_2M_map[64] __cacheline_aligned_in_smp = {
+ {{{0, 0, 0, 0} } }, /* 0: PCI */
+ {{{1, 0x0100000, 0x0102000, 0x120000}, /* 1: PCIE */
+ {1, 0x0110000, 0x0120000, 0x130000},
+ {1, 0x0120000, 0x0122000, 0x124000},
+ {1, 0x0130000, 0x0132000, 0x126000},
+ {1, 0x0140000, 0x0142000, 0x128000},
+ {1, 0x0150000, 0x0152000, 0x12a000},
+ {1, 0x0160000, 0x0170000, 0x110000},
+ {1, 0x0170000, 0x0172000, 0x12e000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x01e0000, 0x01e0800, 0x122000},
+ {0, 0x0000000, 0x0000000, 0x000000} } },
+ {{{1, 0x0200000, 0x0210000, 0x180000} } },/* 2: MN */
+ {{{0, 0, 0, 0} } }, /* 3: */
+ {{{1, 0x0400000, 0x0401000, 0x169000} } },/* 4: P2NR1 */
+ {{{1, 0x0500000, 0x0510000, 0x140000} } },/* 5: SRE */
+ {{{1, 0x0600000, 0x0610000, 0x1c0000} } },/* 6: NIU */
+ {{{1, 0x0700000, 0x0704000, 0x1b8000} } },/* 7: QM */
+ {{{1, 0x0800000, 0x0802000, 0x170000}, /* 8: SQM0 */
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x08f0000, 0x08f2000, 0x172000} } },
+ {{{1, 0x0900000, 0x0902000, 0x174000}, /* 9: SQM1*/
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x09f0000, 0x09f2000, 0x176000} } },
+ {{{0, 0x0a00000, 0x0a02000, 0x178000}, /* 10: SQM2*/
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x0af0000, 0x0af2000, 0x17a000} } },
+ {{{0, 0x0b00000, 0x0b02000, 0x17c000}, /* 11: SQM3*/
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x0bf0000, 0x0bf2000, 0x17e000} } },
+ {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },/* 12: I2Q */
+ {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },/* 13: TMR */
+ {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },/* 14: ROMUSB */
+ {{{1, 0x0f00000, 0x0f01000, 0x164000} } },/* 15: PEG4 */
+ {{{0, 0x1000000, 0x1004000, 0x1a8000} } },/* 16: XDMA */
+ {{{1, 0x1100000, 0x1101000, 0x160000} } },/* 17: PEG0 */
+ {{{1, 0x1200000, 0x1201000, 0x161000} } },/* 18: PEG1 */
+ {{{1, 0x1300000, 0x1301000, 0x162000} } },/* 19: PEG2 */
+ {{{1, 0x1400000, 0x1401000, 0x163000} } },/* 20: PEG3 */
+ {{{1, 0x1500000, 0x1501000, 0x165000} } },/* 21: P2ND */
+ {{{1, 0x1600000, 0x1601000, 0x166000} } },/* 22: P2NI */
+ {{{0, 0, 0, 0} } }, /* 23: */
+ {{{0, 0, 0, 0} } }, /* 24: */
+ {{{0, 0, 0, 0} } }, /* 25: */
+ {{{0, 0, 0, 0} } }, /* 26: */
+ {{{0, 0, 0, 0} } }, /* 27: */
+ {{{0, 0, 0, 0} } }, /* 28: */
+ {{{1, 0x1d00000, 0x1d10000, 0x190000} } },/* 29: MS */
+ {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },/* 30: P2NR2 */
+ {{{1, 0x1f00000, 0x1f10000, 0x150000} } },/* 31: EPG */
+ {{{0} } }, /* 32: PCI */
+ {{{1, 0x2100000, 0x2102000, 0x120000}, /* 33: PCIE */
+ {1, 0x2110000, 0x2120000, 0x130000},
+ {1, 0x2120000, 0x2122000, 0x124000},
+ {1, 0x2130000, 0x2132000, 0x126000},
+ {1, 0x2140000, 0x2142000, 0x128000},
+ {1, 0x2150000, 0x2152000, 0x12a000},
+ {1, 0x2160000, 0x2170000, 0x110000},
+ {1, 0x2170000, 0x2172000, 0x12e000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000} } },
+ {{{1, 0x2200000, 0x2204000, 0x1b0000} } },/* 34: CAM */
+ {{{0} } }, /* 35: */
+ {{{0} } }, /* 36: */
+ {{{0} } }, /* 37: */
+ {{{0} } }, /* 38: */
+ {{{0} } }, /* 39: */
+ {{{1, 0x2800000, 0x2804000, 0x1a4000} } },/* 40: TMR */
+ {{{1, 0x2900000, 0x2901000, 0x16b000} } },/* 41: P2NR3 */
+ {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },/* 42: RPMX1 */
+ {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },/* 43: RPMX2 */
+ {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },/* 44: RPMX3 */
+ {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },/* 45: RPMX4 */
+ {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },/* 46: RPMX5 */
+ {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },/* 47: RPMX6 */
+ {{{1, 0x3000000, 0x3000400, 0x1adc00} } },/* 48: RPMX7 */
+ {{{0, 0x3100000, 0x3104000, 0x1a8000} } },/* 49: XDMA */
+ {{{1, 0x3200000, 0x3204000, 0x1d4000} } },/* 50: I2Q */
+ {{{1, 0x3300000, 0x3304000, 0x1a0000} } },/* 51: ROMUSB */
+ {{{0} } }, /* 52: */
+ {{{1, 0x3500000, 0x3500400, 0x1ac000} } },/* 53: RPMX0 */
+ {{{1, 0x3600000, 0x3600400, 0x1ae000} } },/* 54: RPMX8 */
+ {{{1, 0x3700000, 0x3700400, 0x1ae400} } },/* 55: RPMX9 */
+ {{{1, 0x3800000, 0x3804000, 0x1d0000} } },/* 56: OCM0 */
+ {{{1, 0x3900000, 0x3904000, 0x1b4000} } },/* 57: CRYPTO */
+ {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },/* 58: SMB */
+ {{{0} } }, /* 59: I2C0 */
+ {{{0} } }, /* 60: I2C1 */
+ {{{1, 0x3d00000, 0x3d04000, 0x1d8000} } },/* 61: LPC */
+ {{{1, 0x3e00000, 0x3e01000, 0x167000} } },/* 62: P2NC */
+ {{{1, 0x3f00000, 0x3f01000, 0x168000} } } /* 63: P2NR0 */
+};
+
+/*
+ * top 12 bits of crb internal address (hub, agent)
+ */
+static const unsigned crb_hub_agt[64] = {
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PS,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_MN,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_MS,
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_SRE,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_NIU,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_QMN,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_SQN0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_SQN1,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_SQN2,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_SQN3,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGN4,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGN0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGN1,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGN2,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGN3,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGND,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGNI,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGS0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGS1,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGS2,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGS3,
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGSI,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_SN,
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_EG,
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PS,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_CAM,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR,
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX1,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX2,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX3,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX4,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX5,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX6,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX7,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB,
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX8,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX9,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_OCM0,
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_SMB,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_I2C0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_I2C1,
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGNC,
+ 0,
+};
+
+/* PCI Windowing for DDR regions. */
+
+#define QLCNIC_PCIE_SEM_TIMEOUT 10000
+
+static void qlcnic_read_window_reg(u32 addr, void __iomem *bar0, u32 *data)
+{
+ u32 dest;
+ void __iomem *val;
+
+ dest = addr & 0xFFFF0000;
+ val = bar0 + QLCNIC_FW_DUMP_REG1;
+ writel(dest, val);
+ readl(val);
+ val = bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr);
+ *data = readl(val);
+}
+
+static void qlcnic_write_window_reg(u32 addr, void __iomem *bar0, u32 data)
+{
+ u32 dest;
+ void __iomem *val;
+
+ dest = addr & 0xFFFF0000;
+ val = bar0 + QLCNIC_FW_DUMP_REG1;
+ writel(dest, val);
+ readl(val);
+ val = bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr);
+ writel(data, val);
+ readl(val);
+}
+
+int
+qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
+{
+ int timeout = 0, err = 0, done = 0;
+
+ while (!done) {
+ done = QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem)),
+ &err);
+ if (done == 1)
+ break;
+ if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT) {
+ if (id_reg) {
+ done = QLCRD32(adapter, id_reg, &err);
+ if (done != -1)
+ dev_err(&adapter->pdev->dev,
+ "Failed to acquire sem=%d lock held by=%d\n",
+ sem, done);
+ else
+ dev_err(&adapter->pdev->dev,
+ "Failed to acquire sem=%d lock",
+ sem);
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "Failed to acquire sem=%d lock", sem);
+ }
+ return -EIO;
+ }
+ udelay(1200);
+ }
+
+ if (id_reg)
+ QLCWR32(adapter, id_reg, adapter->portnum);
+
+ return 0;
+}
+
+void
+qlcnic_pcie_sem_unlock(struct qlcnic_adapter *adapter, int sem)
+{
+ int err = 0;
+
+ QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_UNLOCK(sem)), &err);
+}
+
+int qlcnic_ind_rd(struct qlcnic_adapter *adapter, u32 addr)
+{
+ int err = 0;
+ u32 data;
+
+ if (qlcnic_82xx_check(adapter))
+ qlcnic_read_window_reg(addr, adapter->ahw->pci_base0, &data);
+ else {
+ data = QLCRD32(adapter, addr, &err);
+ if (err == -EIO)
+ return err;
+ }
+ return data;
+}
+
+int qlcnic_ind_wr(struct qlcnic_adapter *adapter, u32 addr, u32 data)
+{
+ int ret = 0;
+
+ if (qlcnic_82xx_check(adapter))
+ qlcnic_write_window_reg(addr, adapter->ahw->pci_base0, data);
+ else
+ ret = qlcnic_83xx_wrt_reg_indirect(adapter, addr, data);
+
+ return ret;
+}
+
+static int
+qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
+ struct cmd_desc_type0 *cmd_desc_arr, int nr_desc)
+{
+ u32 i, producer;
+ struct qlcnic_cmd_buffer *pbuf;
+ struct cmd_desc_type0 *cmd_desc;
+ struct qlcnic_host_tx_ring *tx_ring;
+
+ i = 0;
+
+ if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
+ return -EIO;
+
+ tx_ring = &adapter->tx_ring[0];
+ __netif_tx_lock_bh(tx_ring->txq);
+
+ producer = tx_ring->producer;
+
+ if (nr_desc >= qlcnic_tx_avail(tx_ring)) {
+ netif_tx_stop_queue(tx_ring->txq);
+ smp_mb();
+ if (qlcnic_tx_avail(tx_ring) > nr_desc) {
+ if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
+ netif_tx_wake_queue(tx_ring->txq);
+ } else {
+ adapter->stats.xmit_off++;
+ __netif_tx_unlock_bh(tx_ring->txq);
+ return -EBUSY;
+ }
+ }
+
+ do {
+ cmd_desc = &cmd_desc_arr[i];
+
+ pbuf = &tx_ring->cmd_buf_arr[producer];
+ pbuf->skb = NULL;
+ pbuf->frag_count = 0;
+
+ memcpy(&tx_ring->desc_head[producer],
+ cmd_desc, sizeof(struct cmd_desc_type0));
+
+ producer = get_next_index(producer, tx_ring->num_desc);
+ i++;
+
+ } while (i != nr_desc);
+
+ tx_ring->producer = producer;
+
+ qlcnic_update_cmd_producer(tx_ring);
+
+ __netif_tx_unlock_bh(tx_ring->txq);
+
+ return 0;
+}
+
+int qlcnic_82xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
+ u16 vlan_id, u8 op)
+{
+ struct qlcnic_nic_req req;
+ struct qlcnic_mac_req *mac_req;
+ struct qlcnic_vlan_req *vlan_req;
+ u64 word;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+ req.qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
+
+ word = QLCNIC_MAC_EVENT | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ mac_req = (struct qlcnic_mac_req *)&req.words[0];
+ mac_req->op = op;
+ memcpy(mac_req->mac_addr, addr, ETH_ALEN);
+
+ vlan_req = (struct qlcnic_vlan_req *)&req.words[1];
+ vlan_req->vlan_id = cpu_to_le16(vlan_id);
+
+ return qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+}
+
+int qlcnic_nic_del_mac(struct qlcnic_adapter *adapter, const u8 *addr)
+{
+ struct qlcnic_mac_vlan_list *cur;
+ int err = -EINVAL;
+
+ /* Delete MAC from the existing list */
+ list_for_each_entry(cur, &adapter->mac_list, list) {
+ if (ether_addr_equal(addr, cur->mac_addr)) {
+ err = qlcnic_sre_macaddr_change(adapter, cur->mac_addr,
+ 0, QLCNIC_MAC_DEL);
+ if (err)
+ return err;
+ list_del(&cur->list);
+ kfree(cur);
+ return err;
+ }
+ }
+ return err;
+}
+
+int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr, u16 vlan,
+ enum qlcnic_mac_type mac_type)
+{
+ struct qlcnic_mac_vlan_list *cur;
+
+ /* look up if already exists */
+ list_for_each_entry(cur, &adapter->mac_list, list) {
+ if (ether_addr_equal(addr, cur->mac_addr) &&
+ cur->vlan_id == vlan)
+ return 0;
+ }
+
+ cur = kzalloc(sizeof(*cur), GFP_ATOMIC);
+ if (cur == NULL)
+ return -ENOMEM;
+
+ memcpy(cur->mac_addr, addr, ETH_ALEN);
+
+ if (qlcnic_sre_macaddr_change(adapter,
+ cur->mac_addr, vlan, QLCNIC_MAC_ADD)) {
+ kfree(cur);
+ return -EIO;
+ }
+
+ cur->vlan_id = vlan;
+ cur->mac_type = mac_type;
+
+ list_add_tail(&cur->list, &adapter->mac_list);
+ return 0;
+}
+
+void qlcnic_flush_mcast_mac(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_mac_vlan_list *cur;
+ struct list_head *head, *tmp;
+
+ list_for_each_safe(head, tmp, &adapter->mac_list) {
+ cur = list_entry(head, struct qlcnic_mac_vlan_list, list);
+ if (cur->mac_type != QLCNIC_MULTICAST_MAC)
+ continue;
+
+ qlcnic_sre_macaddr_change(adapter, cur->mac_addr,
+ cur->vlan_id, QLCNIC_MAC_DEL);
+ list_del(&cur->list);
+ kfree(cur);
+ }
+}
+
+static void __qlcnic_set_multi(struct net_device *netdev, u16 vlan)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct netdev_hw_addr *ha;
+ static const u8 bcast_addr[ETH_ALEN] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ };
+ u32 mode = VPORT_MISS_MODE_DROP;
+
+ if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
+ return;
+
+ qlcnic_nic_add_mac(adapter, adapter->mac_addr, vlan,
+ QLCNIC_UNICAST_MAC);
+ qlcnic_nic_add_mac(adapter, bcast_addr, vlan, QLCNIC_BROADCAST_MAC);
+
+ if (netdev->flags & IFF_PROMISC) {
+ if (!(adapter->flags & QLCNIC_PROMISC_DISABLED))
+ mode = VPORT_MISS_MODE_ACCEPT_ALL;
+ } else if ((netdev->flags & IFF_ALLMULTI) ||
+ (netdev_mc_count(netdev) > ahw->max_mc_count)) {
+ mode = VPORT_MISS_MODE_ACCEPT_MULTI;
+ } else if (!netdev_mc_empty(netdev)) {
+ qlcnic_flush_mcast_mac(adapter);
+ netdev_for_each_mc_addr(ha, netdev)
+ qlcnic_nic_add_mac(adapter, ha->addr, vlan,
+ QLCNIC_MULTICAST_MAC);
+ }
+
+ /* configure unicast MAC address, if there is not sufficient space
+ * to store all the unicast addresses then enable promiscuous mode
+ */
+ if (netdev_uc_count(netdev) > ahw->max_uc_count) {
+ mode = VPORT_MISS_MODE_ACCEPT_ALL;
+ } else if (!netdev_uc_empty(netdev)) {
+ netdev_for_each_uc_addr(ha, netdev)
+ qlcnic_nic_add_mac(adapter, ha->addr, vlan,
+ QLCNIC_UNICAST_MAC);
+ }
+
+ if (mode == VPORT_MISS_MODE_ACCEPT_ALL &&
+ !adapter->fdb_mac_learn) {
+ qlcnic_alloc_lb_filters_mem(adapter);
+ adapter->drv_mac_learn = 1;
+ if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
+ adapter->rx_mac_learn = true;
+ } else {
+ adapter->drv_mac_learn = 0;
+ adapter->rx_mac_learn = false;
+ }
+
+ qlcnic_nic_set_promisc(adapter, mode);
+}
+
+void qlcnic_set_multi(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
+ return;
+
+ if (qlcnic_sriov_vf_check(adapter))
+ qlcnic_sriov_vf_set_multi(netdev);
+ else
+ __qlcnic_set_multi(netdev, 0);
+}
+
+int qlcnic_82xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
+{
+ struct qlcnic_nic_req req;
+ u64 word;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word = QLCNIC_H2C_OPCODE_SET_MAC_RECEIVE_MODE |
+ ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ req.words[0] = cpu_to_le64(mode);
+
+ return qlcnic_send_cmd_descs(adapter,
+ (struct cmd_desc_type0 *)&req, 1);
+}
+
+void qlcnic_82xx_free_mac_list(struct qlcnic_adapter *adapter)
+{
+ struct list_head *head = &adapter->mac_list;
+ struct qlcnic_mac_vlan_list *cur;
+
+ while (!list_empty(head)) {
+ cur = list_entry(head->next, struct qlcnic_mac_vlan_list, list);
+ qlcnic_sre_macaddr_change(adapter,
+ cur->mac_addr, 0, QLCNIC_MAC_DEL);
+ list_del(&cur->list);
+ kfree(cur);
+ }
+}
+
+void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_filter *tmp_fil;
+ struct hlist_node *n;
+ struct hlist_head *head;
+ int i;
+ unsigned long expires;
+ u8 cmd;
+
+ for (i = 0; i < adapter->fhash.fbucket_size; i++) {
+ head = &(adapter->fhash.fhead[i]);
+ hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
+ cmd = tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL :
+ QLCNIC_MAC_DEL;
+ expires = tmp_fil->ftime + QLCNIC_FILTER_AGE * HZ;
+ if (time_before(expires, jiffies)) {
+ qlcnic_sre_macaddr_change(adapter,
+ tmp_fil->faddr,
+ tmp_fil->vlan_id,
+ cmd);
+ spin_lock_bh(&adapter->mac_learn_lock);
+ adapter->fhash.fnum--;
+ hlist_del(&tmp_fil->fnode);
+ spin_unlock_bh(&adapter->mac_learn_lock);
+ kfree(tmp_fil);
+ }
+ }
+ }
+ for (i = 0; i < adapter->rx_fhash.fbucket_size; i++) {
+ head = &(adapter->rx_fhash.fhead[i]);
+
+ hlist_for_each_entry_safe(tmp_fil, n, head, fnode)
+ {
+ expires = tmp_fil->ftime + QLCNIC_FILTER_AGE * HZ;
+ if (time_before(expires, jiffies)) {
+ spin_lock_bh(&adapter->rx_mac_learn_lock);
+ adapter->rx_fhash.fnum--;
+ hlist_del(&tmp_fil->fnode);
+ spin_unlock_bh(&adapter->rx_mac_learn_lock);
+ kfree(tmp_fil);
+ }
+ }
+ }
+}
+
+void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_filter *tmp_fil;
+ struct hlist_node *n;
+ struct hlist_head *head;
+ int i;
+ u8 cmd;
+
+ for (i = 0; i < adapter->fhash.fbucket_size; i++) {
+ head = &(adapter->fhash.fhead[i]);
+ hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
+ cmd = tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL :
+ QLCNIC_MAC_DEL;
+ qlcnic_sre_macaddr_change(adapter,
+ tmp_fil->faddr,
+ tmp_fil->vlan_id,
+ cmd);
+ spin_lock_bh(&adapter->mac_learn_lock);
+ adapter->fhash.fnum--;
+ hlist_del(&tmp_fil->fnode);
+ spin_unlock_bh(&adapter->mac_learn_lock);
+ kfree(tmp_fil);
+ }
+ }
+}
+
+static int qlcnic_set_fw_loopback(struct qlcnic_adapter *adapter, u8 flag)
+{
+ struct qlcnic_nic_req req;
+ int rv;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+ req.req_hdr = cpu_to_le64(QLCNIC_H2C_OPCODE_CONFIG_LOOPBACK |
+ ((u64) adapter->portnum << 16) | ((u64) 0x1 << 32));
+
+ req.words[0] = cpu_to_le64(flag);
+
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0)
+ dev_err(&adapter->pdev->dev, "%sting loopback mode failed\n",
+ flag ? "Set" : "Reset");
+ return rv;
+}
+
+int qlcnic_82xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
+{
+ if (qlcnic_set_fw_loopback(adapter, mode))
+ return -EIO;
+
+ if (qlcnic_nic_set_promisc(adapter,
+ VPORT_MISS_MODE_ACCEPT_ALL)) {
+ qlcnic_set_fw_loopback(adapter, 0);
+ return -EIO;
+ }
+
+ msleep(1000);
+ return 0;
+}
+
+int qlcnic_82xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ mode = VPORT_MISS_MODE_DROP;
+ qlcnic_set_fw_loopback(adapter, 0);
+
+ if (netdev->flags & IFF_PROMISC)
+ mode = VPORT_MISS_MODE_ACCEPT_ALL;
+ else if (netdev->flags & IFF_ALLMULTI)
+ mode = VPORT_MISS_MODE_ACCEPT_MULTI;
+
+ qlcnic_nic_set_promisc(adapter, mode);
+ msleep(1000);
+ return 0;
+}
+
+int qlcnic_82xx_read_phys_port_id(struct qlcnic_adapter *adapter)
+{
+ u8 mac[ETH_ALEN];
+ int ret;
+
+ ret = qlcnic_get_mac_address(adapter, mac,
+ adapter->ahw->physical_port);
+ if (ret)
+ return ret;
+
+ memcpy(adapter->ahw->phys_port_id, mac, ETH_ALEN);
+ adapter->flags |= QLCNIC_HAS_PHYS_PORT_ID;
+
+ return 0;
+}
+
+int qlcnic_82xx_set_rx_coalesce(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_nic_req req;
+ int rv;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ req.req_hdr = cpu_to_le64(QLCNIC_CONFIG_INTR_COALESCE |
+ ((u64) adapter->portnum << 16));
+
+ req.words[0] = cpu_to_le64(((u64) adapter->ahw->coal.flag) << 32);
+ req.words[2] = cpu_to_le64(adapter->ahw->coal.rx_packets |
+ ((u64) adapter->ahw->coal.rx_time_us) << 16);
+ req.words[5] = cpu_to_le64(adapter->ahw->coal.timer_out |
+ ((u64) adapter->ahw->coal.type) << 32 |
+ ((u64) adapter->ahw->coal.sts_ring_mask) << 40);
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0)
+ dev_err(&adapter->netdev->dev,
+ "Could not send interrupt coalescing parameters\n");
+
+ return rv;
+}
+
+/* Send the interrupt coalescing parameter set by ethtool to the card. */
+int qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *adapter,
+ struct ethtool_coalesce *ethcoal)
+{
+ struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal;
+ int rv;
+
+ coal->flag = QLCNIC_INTR_DEFAULT;
+ coal->rx_time_us = ethcoal->rx_coalesce_usecs;
+ coal->rx_packets = ethcoal->rx_max_coalesced_frames;
+
+ rv = qlcnic_82xx_set_rx_coalesce(adapter);
+
+ if (rv)
+ netdev_err(adapter->netdev,
+ "Failed to set Rx coalescing parameters\n");
+
+ return rv;
+}
+
+#define QLCNIC_ENABLE_IPV4_LRO BIT_0
+#define QLCNIC_ENABLE_IPV6_LRO (BIT_1 | BIT_9)
+
+int qlcnic_82xx_config_hw_lro(struct qlcnic_adapter *adapter, int enable)
+{
+ struct qlcnic_nic_req req;
+ u64 word;
+ int rv;
+
+ if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
+ return 0;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word = QLCNIC_H2C_OPCODE_CONFIG_HW_LRO | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ word = 0;
+ if (enable) {
+ word = QLCNIC_ENABLE_IPV4_LRO;
+ if (adapter->ahw->extra_capability[0] &
+ QLCNIC_FW_CAP2_HW_LRO_IPV6)
+ word |= QLCNIC_ENABLE_IPV6_LRO;
+ }
+
+ req.words[0] = cpu_to_le64(word);
+
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0)
+ dev_err(&adapter->netdev->dev,
+ "Could not send configure hw lro request\n");
+
+ return rv;
+}
+
+int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
+{
+ struct qlcnic_nic_req req;
+ u64 word;
+ int rv;
+
+ if (!!(adapter->flags & QLCNIC_BRIDGE_ENABLED) == enable)
+ return 0;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word = QLCNIC_H2C_OPCODE_CONFIG_BRIDGING |
+ ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ req.words[0] = cpu_to_le64(enable);
+
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0)
+ dev_err(&adapter->netdev->dev,
+ "Could not send configure bridge mode request\n");
+
+ adapter->flags ^= QLCNIC_BRIDGE_ENABLED;
+
+ return rv;
+}
+
+
+#define QLCNIC_RSS_HASHTYPE_IP_TCP 0x3
+#define QLCNIC_ENABLE_TYPE_C_RSS BIT_10
+#define QLCNIC_RSS_FEATURE_FLAG (1ULL << 63)
+#define QLCNIC_RSS_IND_TABLE_MASK 0x7ULL
+
+int qlcnic_82xx_config_rss(struct qlcnic_adapter *adapter, int enable)
+{
+ struct qlcnic_nic_req req;
+ u64 word;
+ int i, rv;
+
+ static const u64 key[] = {
+ 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL,
+ 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
+ 0x255b0ec26d5a56daULL
+ };
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word = QLCNIC_H2C_OPCODE_CONFIG_RSS | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ /*
+ * RSS request:
+ * bits 3-0: hash_method
+ * 5-4: hash_type_ipv4
+ * 7-6: hash_type_ipv6
+ * 8: enable
+ * 9: use indirection table
+ * 10: type-c rss
+ * 11: udp rss
+ * 47-12: reserved
+ * 62-48: indirection table mask
+ * 63: feature flag
+ */
+ word = ((u64)(QLCNIC_RSS_HASHTYPE_IP_TCP & 0x3) << 4) |
+ ((u64)(QLCNIC_RSS_HASHTYPE_IP_TCP & 0x3) << 6) |
+ ((u64)(enable & 0x1) << 8) |
+ ((u64)QLCNIC_RSS_IND_TABLE_MASK << 48) |
+ (u64)QLCNIC_ENABLE_TYPE_C_RSS |
+ (u64)QLCNIC_RSS_FEATURE_FLAG;
+
+ req.words[0] = cpu_to_le64(word);
+ for (i = 0; i < 5; i++)
+ req.words[i+1] = cpu_to_le64(key[i]);
+
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0)
+ dev_err(&adapter->netdev->dev, "could not configure RSS\n");
+
+ return rv;
+}
+
+void qlcnic_82xx_config_ipaddr(struct qlcnic_adapter *adapter,
+ __be32 ip, int cmd)
+{
+ struct qlcnic_nic_req req;
+ struct qlcnic_ipaddr *ipa;
+ u64 word;
+ int rv;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word = QLCNIC_H2C_OPCODE_CONFIG_IPADDR | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ req.words[0] = cpu_to_le64(cmd);
+ ipa = (struct qlcnic_ipaddr *)&req.words[1];
+ ipa->ipv4 = ip;
+
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0)
+ dev_err(&adapter->netdev->dev,
+ "could not notify %s IP 0x%x request\n",
+ (cmd == QLCNIC_IP_UP) ? "Add" : "Remove", ip);
+}
+
+int qlcnic_82xx_linkevent_request(struct qlcnic_adapter *adapter, int enable)
+{
+ struct qlcnic_nic_req req;
+ u64 word;
+ int rv;
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word = QLCNIC_H2C_OPCODE_GET_LINKEVENT | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+ req.words[0] = cpu_to_le64(enable | (enable << 8));
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0)
+ dev_err(&adapter->netdev->dev,
+ "could not configure link notification\n");
+
+ return rv;
+}
+
+static int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_nic_req req;
+ u64 word;
+ int rv;
+
+ if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
+ return 0;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word = QLCNIC_H2C_OPCODE_LRO_REQUEST |
+ ((u64)adapter->portnum << 16) |
+ ((u64)QLCNIC_LRO_REQUEST_CLEANUP << 56) ;
+
+ req.req_hdr = cpu_to_le64(word);
+
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0)
+ dev_err(&adapter->netdev->dev,
+ "could not cleanup lro flows\n");
+
+ return rv;
+}
+
+/*
+ * qlcnic_change_mtu - Change the Maximum Transfer Unit
+ * @returns 0 on success, negative on failure
+ */
+
+int qlcnic_change_mtu(struct net_device *netdev, int mtu)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int rc = 0;
+
+ rc = qlcnic_fw_cmd_set_mtu(adapter, mtu);
+
+ if (!rc)
+ netdev->mtu = mtu;
+
+ return rc;
+}
+
+static netdev_features_t qlcnic_process_flags(struct qlcnic_adapter *adapter,
+ netdev_features_t features)
+{
+ u32 offload_flags = adapter->offload_flags;
+
+ if (offload_flags & BIT_0) {
+ features |= NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM;
+ adapter->rx_csum = 1;
+ if (QLCNIC_IS_TSO_CAPABLE(adapter)) {
+ if (!(offload_flags & BIT_1))
+ features &= ~NETIF_F_TSO;
+ else
+ features |= NETIF_F_TSO;
+
+ if (!(offload_flags & BIT_2))
+ features &= ~NETIF_F_TSO6;
+ else
+ features |= NETIF_F_TSO6;
+ }
+ } else {
+ features &= ~(NETIF_F_RXCSUM |
+ NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM);
+
+ if (QLCNIC_IS_TSO_CAPABLE(adapter))
+ features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+ adapter->rx_csum = 0;
+ }
+
+ return features;
+}
+
+netdev_features_t qlcnic_fix_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ netdev_features_t changed;
+
+ if (qlcnic_82xx_check(adapter) &&
+ (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
+ if (adapter->flags & QLCNIC_APP_CHANGED_FLAGS) {
+ features = qlcnic_process_flags(adapter, features);
+ } else {
+ changed = features ^ netdev->features;
+ features ^= changed & (NETIF_F_RXCSUM |
+ NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO6);
+ }
+ }
+
+ if (!(features & NETIF_F_RXCSUM))
+ features &= ~NETIF_F_LRO;
+
+ return features;
+}
+
+
+int qlcnic_set_features(struct net_device *netdev, netdev_features_t features)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ netdev_features_t changed = netdev->features ^ features;
+ int hw_lro = (features & NETIF_F_LRO) ? QLCNIC_LRO_ENABLED : 0;
+
+ if (!(changed & NETIF_F_LRO))
+ return 0;
+
+ netdev->features ^= NETIF_F_LRO;
+
+ if (qlcnic_config_hw_lro(adapter, hw_lro))
+ return -EIO;
+
+ if (!hw_lro && qlcnic_82xx_check(adapter)) {
+ if (qlcnic_send_lro_cleanup(adapter))
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/*
+ * Changes the CRB window to the specified window.
+ */
+ /* Returns < 0 if off is not valid,
+ * 1 if window access is needed. 'off' is set to offset from
+ * CRB space in 128M pci map
+ * 0 if no window access is needed. 'off' is set to 2M addr
+ * In: 'off' is offset from base in 128M pci map
+ */
+static int qlcnic_pci_get_crb_addr_2M(struct qlcnic_hardware_context *ahw,
+ ulong off, void __iomem **addr)
+{
+ const struct crb_128M_2M_sub_block_map *m;
+
+ if ((off >= QLCNIC_CRB_MAX) || (off < QLCNIC_PCI_CRBSPACE))
+ return -EINVAL;
+
+ off -= QLCNIC_PCI_CRBSPACE;
+
+ /*
+ * Try direct map
+ */
+ m = &crb_128M_2M_map[CRB_BLK(off)].sub_block[CRB_SUBBLK(off)];
+
+ if (m->valid && (m->start_128M <= off) && (m->end_128M > off)) {
+ *addr = ahw->pci_base0 + m->start_2M +
+ (off - m->start_128M);
+ return 0;
+ }
+
+ /*
+ * Not in direct map, use crb window
+ */
+ *addr = ahw->pci_base0 + CRB_INDIRECT_2M + (off & MASK(16));
+ return 1;
+}
+
+/*
+ * In: 'off' is offset from CRB space in 128M pci map
+ * Out: 'off' is 2M pci map addr
+ * side effect: lock crb window
+ */
+static int
+qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off)
+{
+ u32 window;
+ void __iomem *addr = adapter->ahw->pci_base0 + CRB_WINDOW_2M;
+
+ off -= QLCNIC_PCI_CRBSPACE;
+
+ window = CRB_HI(off);
+ if (window == 0) {
+ dev_err(&adapter->pdev->dev, "Invalid offset 0x%lx\n", off);
+ return -EIO;
+ }
+
+ writel(window, addr);
+ if (readl(addr) != window) {
+ if (printk_ratelimit())
+ dev_warn(&adapter->pdev->dev,
+ "failed to set CRB window to %d off 0x%lx\n",
+ window, off);
+ return -EIO;
+ }
+ return 0;
+}
+
+int qlcnic_82xx_hw_write_wx_2M(struct qlcnic_adapter *adapter, ulong off,
+ u32 data)
+{
+ unsigned long flags;
+ int rv;
+ void __iomem *addr = NULL;
+
+ rv = qlcnic_pci_get_crb_addr_2M(adapter->ahw, off, &addr);
+
+ if (rv == 0) {
+ writel(data, addr);
+ return 0;
+ }
+
+ if (rv > 0) {
+ /* indirect access */
+ write_lock_irqsave(&adapter->ahw->crb_lock, flags);
+ crb_win_lock(adapter);
+ rv = qlcnic_pci_set_crbwindow_2M(adapter, off);
+ if (!rv)
+ writel(data, addr);
+ crb_win_unlock(adapter);
+ write_unlock_irqrestore(&adapter->ahw->crb_lock, flags);
+ return rv;
+ }
+
+ dev_err(&adapter->pdev->dev,
+ "%s: invalid offset: 0x%016lx\n", __func__, off);
+ dump_stack();
+ return -EIO;
+}
+
+int qlcnic_82xx_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off,
+ int *err)
+{
+ unsigned long flags;
+ int rv;
+ u32 data = -1;
+ void __iomem *addr = NULL;
+
+ rv = qlcnic_pci_get_crb_addr_2M(adapter->ahw, off, &addr);
+
+ if (rv == 0)
+ return readl(addr);
+
+ if (rv > 0) {
+ /* indirect access */
+ write_lock_irqsave(&adapter->ahw->crb_lock, flags);
+ crb_win_lock(adapter);
+ if (!qlcnic_pci_set_crbwindow_2M(adapter, off))
+ data = readl(addr);
+ crb_win_unlock(adapter);
+ write_unlock_irqrestore(&adapter->ahw->crb_lock, flags);
+ return data;
+ }
+
+ dev_err(&adapter->pdev->dev,
+ "%s: invalid offset: 0x%016lx\n", __func__, off);
+ dump_stack();
+ return -1;
+}
+
+void __iomem *qlcnic_get_ioaddr(struct qlcnic_hardware_context *ahw,
+ u32 offset)
+{
+ void __iomem *addr = NULL;
+
+ WARN_ON(qlcnic_pci_get_crb_addr_2M(ahw, offset, &addr));
+
+ return addr;
+}
+
+static int qlcnic_pci_mem_access_direct(struct qlcnic_adapter *adapter,
+ u32 window, u64 off, u64 *data, int op)
+{
+ void __iomem *addr;
+ u32 start;
+
+ mutex_lock(&adapter->ahw->mem_lock);
+
+ writel(window, adapter->ahw->ocm_win_crb);
+ /* read back to flush */
+ readl(adapter->ahw->ocm_win_crb);
+ start = QLCNIC_PCI_OCM0_2M + off;
+
+ addr = adapter->ahw->pci_base0 + start;
+
+ if (op == 0) /* read */
+ *data = readq(addr);
+ else /* write */
+ writeq(*data, addr);
+
+ /* Set window to 0 */
+ writel(0, adapter->ahw->ocm_win_crb);
+ readl(adapter->ahw->ocm_win_crb);
+
+ mutex_unlock(&adapter->ahw->mem_lock);
+ return 0;
+}
+
+static void
+qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *adapter, u64 off, u64 *data)
+{
+ void __iomem *addr = adapter->ahw->pci_base0 +
+ QLCNIC_PCI_CAMQM_2M_BASE + (off - QLCNIC_PCI_CAMQM);
+
+ mutex_lock(&adapter->ahw->mem_lock);
+ *data = readq(addr);
+ mutex_unlock(&adapter->ahw->mem_lock);
+}
+
+static void
+qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *adapter, u64 off, u64 data)
+{
+ void __iomem *addr = adapter->ahw->pci_base0 +
+ QLCNIC_PCI_CAMQM_2M_BASE + (off - QLCNIC_PCI_CAMQM);
+
+ mutex_lock(&adapter->ahw->mem_lock);
+ writeq(data, addr);
+ mutex_unlock(&adapter->ahw->mem_lock);
+}
+
+
+
+/* Set MS memory control data for different adapters */
+static void qlcnic_set_ms_controls(struct qlcnic_adapter *adapter, u64 off,
+ struct qlcnic_ms_reg_ctrl *ms)
+{
+ ms->control = QLCNIC_MS_CTRL;
+ ms->low = QLCNIC_MS_ADDR_LO;
+ ms->hi = QLCNIC_MS_ADDR_HI;
+ if (off & 0xf) {
+ ms->wd[0] = QLCNIC_MS_WRTDATA_LO;
+ ms->rd[0] = QLCNIC_MS_RDDATA_LO;
+ ms->wd[1] = QLCNIC_MS_WRTDATA_HI;
+ ms->rd[1] = QLCNIC_MS_RDDATA_HI;
+ ms->wd[2] = QLCNIC_MS_WRTDATA_ULO;
+ ms->wd[3] = QLCNIC_MS_WRTDATA_UHI;
+ ms->rd[2] = QLCNIC_MS_RDDATA_ULO;
+ ms->rd[3] = QLCNIC_MS_RDDATA_UHI;
+ } else {
+ ms->wd[0] = QLCNIC_MS_WRTDATA_ULO;
+ ms->rd[0] = QLCNIC_MS_RDDATA_ULO;
+ ms->wd[1] = QLCNIC_MS_WRTDATA_UHI;
+ ms->rd[1] = QLCNIC_MS_RDDATA_UHI;
+ ms->wd[2] = QLCNIC_MS_WRTDATA_LO;
+ ms->wd[3] = QLCNIC_MS_WRTDATA_HI;
+ ms->rd[2] = QLCNIC_MS_RDDATA_LO;
+ ms->rd[3] = QLCNIC_MS_RDDATA_HI;
+ }
+
+ ms->ocm_window = OCM_WIN_P3P(off);
+ ms->off = GET_MEM_OFFS_2M(off);
+}
+
+int qlcnic_pci_mem_write_2M(struct qlcnic_adapter *adapter, u64 off, u64 data)
+{
+ int j, ret = 0;
+ u32 temp, off8;
+ struct qlcnic_ms_reg_ctrl ms;
+
+ /* Only 64-bit aligned access */
+ if (off & 7)
+ return -EIO;
+
+ memset(&ms, 0, sizeof(struct qlcnic_ms_reg_ctrl));
+ if (!(ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET,
+ QLCNIC_ADDR_QDR_NET_MAX) ||
+ ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET,
+ QLCNIC_ADDR_DDR_NET_MAX)))
+ return -EIO;
+
+ qlcnic_set_ms_controls(adapter, off, &ms);
+
+ if (ADDR_IN_RANGE(off, QLCNIC_ADDR_OCM0, QLCNIC_ADDR_OCM0_MAX))
+ return qlcnic_pci_mem_access_direct(adapter, ms.ocm_window,
+ ms.off, &data, 1);
+
+ off8 = off & ~0xf;
+
+ mutex_lock(&adapter->ahw->mem_lock);
+
+ qlcnic_ind_wr(adapter, ms.low, off8);
+ qlcnic_ind_wr(adapter, ms.hi, 0);
+
+ qlcnic_ind_wr(adapter, ms.control, TA_CTL_ENABLE);
+ qlcnic_ind_wr(adapter, ms.control, QLCNIC_TA_START_ENABLE);
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ temp = qlcnic_ind_rd(adapter, ms.control);
+ if ((temp & TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ if (j >= MAX_CTL_CHECK) {
+ ret = -EIO;
+ goto done;
+ }
+
+ /* This is the modify part of read-modify-write */
+ qlcnic_ind_wr(adapter, ms.wd[0], qlcnic_ind_rd(adapter, ms.rd[0]));
+ qlcnic_ind_wr(adapter, ms.wd[1], qlcnic_ind_rd(adapter, ms.rd[1]));
+ /* This is the write part of read-modify-write */
+ qlcnic_ind_wr(adapter, ms.wd[2], data & 0xffffffff);
+ qlcnic_ind_wr(adapter, ms.wd[3], (data >> 32) & 0xffffffff);
+
+ qlcnic_ind_wr(adapter, ms.control, QLCNIC_TA_WRITE_ENABLE);
+ qlcnic_ind_wr(adapter, ms.control, QLCNIC_TA_WRITE_START);
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ temp = qlcnic_ind_rd(adapter, ms.control);
+ if ((temp & TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ if (j >= MAX_CTL_CHECK) {
+ if (printk_ratelimit())
+ dev_err(&adapter->pdev->dev,
+ "failed to write through agent\n");
+ ret = -EIO;
+ } else
+ ret = 0;
+
+done:
+ mutex_unlock(&adapter->ahw->mem_lock);
+
+ return ret;
+}
+
+int qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter, u64 off, u64 *data)
+{
+ int j, ret;
+ u32 temp, off8;
+ u64 val;
+ struct qlcnic_ms_reg_ctrl ms;
+
+ /* Only 64-bit aligned access */
+ if (off & 7)
+ return -EIO;
+ if (!(ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET,
+ QLCNIC_ADDR_QDR_NET_MAX) ||
+ ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET,
+ QLCNIC_ADDR_DDR_NET_MAX)))
+ return -EIO;
+
+ memset(&ms, 0, sizeof(struct qlcnic_ms_reg_ctrl));
+ qlcnic_set_ms_controls(adapter, off, &ms);
+
+ if (ADDR_IN_RANGE(off, QLCNIC_ADDR_OCM0, QLCNIC_ADDR_OCM0_MAX))
+ return qlcnic_pci_mem_access_direct(adapter, ms.ocm_window,
+ ms.off, data, 0);
+
+ mutex_lock(&adapter->ahw->mem_lock);
+
+ off8 = off & ~0xf;
+
+ qlcnic_ind_wr(adapter, ms.low, off8);
+ qlcnic_ind_wr(adapter, ms.hi, 0);
+
+ qlcnic_ind_wr(adapter, ms.control, TA_CTL_ENABLE);
+ qlcnic_ind_wr(adapter, ms.control, QLCNIC_TA_START_ENABLE);
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ temp = qlcnic_ind_rd(adapter, ms.control);
+ if ((temp & TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ if (j >= MAX_CTL_CHECK) {
+ if (printk_ratelimit())
+ dev_err(&adapter->pdev->dev,
+ "failed to read through agent\n");
+ ret = -EIO;
+ } else {
+
+ temp = qlcnic_ind_rd(adapter, ms.rd[3]);
+ val = (u64)temp << 32;
+ val |= qlcnic_ind_rd(adapter, ms.rd[2]);
+ *data = val;
+ ret = 0;
+ }
+
+ mutex_unlock(&adapter->ahw->mem_lock);
+
+ return ret;
+}
+
+int qlcnic_82xx_get_board_info(struct qlcnic_adapter *adapter)
+{
+ int offset, board_type, magic, err = 0;
+ struct pci_dev *pdev = adapter->pdev;
+
+ offset = QLCNIC_FW_MAGIC_OFFSET;
+ if (qlcnic_rom_fast_read(adapter, offset, &magic))
+ return -EIO;
+
+ if (magic != QLCNIC_BDINFO_MAGIC) {
+ dev_err(&pdev->dev, "invalid board config, magic=%08x\n",
+ magic);
+ return -EIO;
+ }
+
+ offset = QLCNIC_BRDTYPE_OFFSET;
+ if (qlcnic_rom_fast_read(adapter, offset, &board_type))
+ return -EIO;
+
+ adapter->ahw->board_type = board_type;
+
+ if (board_type == QLCNIC_BRDTYPE_P3P_4_GB_MM) {
+ u32 gpio = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_PAD_GPIO_I, &err);
+ if (err == -EIO)
+ return err;
+ if ((gpio & 0x8000) == 0)
+ board_type = QLCNIC_BRDTYPE_P3P_10G_TP;
+ }
+
+ switch (board_type) {
+ case QLCNIC_BRDTYPE_P3P_HMEZ:
+ case QLCNIC_BRDTYPE_P3P_XG_LOM:
+ case QLCNIC_BRDTYPE_P3P_10G_CX4:
+ case QLCNIC_BRDTYPE_P3P_10G_CX4_LP:
+ case QLCNIC_BRDTYPE_P3P_IMEZ:
+ case QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS:
+ case QLCNIC_BRDTYPE_P3P_10G_SFP_CT:
+ case QLCNIC_BRDTYPE_P3P_10G_SFP_QT:
+ case QLCNIC_BRDTYPE_P3P_10G_XFP:
+ case QLCNIC_BRDTYPE_P3P_10000_BASE_T:
+ adapter->ahw->port_type = QLCNIC_XGBE;
+ break;
+ case QLCNIC_BRDTYPE_P3P_REF_QG:
+ case QLCNIC_BRDTYPE_P3P_4_GB:
+ case QLCNIC_BRDTYPE_P3P_4_GB_MM:
+ adapter->ahw->port_type = QLCNIC_GBE;
+ break;
+ case QLCNIC_BRDTYPE_P3P_10G_TP:
+ adapter->ahw->port_type = (adapter->portnum < 2) ?
+ QLCNIC_XGBE : QLCNIC_GBE;
+ break;
+ default:
+ dev_err(&pdev->dev, "unknown board type %x\n", board_type);
+ adapter->ahw->port_type = QLCNIC_XGBE;
+ break;
+ }
+
+ return 0;
+}
+
+static int
+qlcnic_wol_supported(struct qlcnic_adapter *adapter)
+{
+ u32 wol_cfg;
+ int err = 0;
+
+ wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV, &err);
+ if (wol_cfg & (1UL << adapter->portnum)) {
+ wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG, &err);
+ if (err == -EIO)
+ return err;
+ if (wol_cfg & (1 << adapter->portnum))
+ return 1;
+ }
+
+ return 0;
+}
+
+int qlcnic_82xx_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
+{
+ struct qlcnic_nic_req req;
+ int rv;
+ u64 word;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word = QLCNIC_H2C_OPCODE_CONFIG_LED | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ req.words[0] = cpu_to_le64(((u64)rate << 32) | adapter->portnum);
+ req.words[1] = cpu_to_le64(state);
+
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv)
+ dev_err(&adapter->pdev->dev, "LED configuration failed.\n");
+
+ return rv;
+}
+
+void qlcnic_82xx_get_beacon_state(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_cmd_args cmd;
+ u8 beacon_state;
+ int err = 0;
+
+ if (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_BEACON) {
+ err = qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_GET_LED_STATUS);
+ if (!err) {
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ netdev_err(adapter->netdev,
+ "Failed to get current beacon state, err=%d\n",
+ err);
+ } else {
+ beacon_state = cmd.rsp.arg[1];
+ if (beacon_state == QLCNIC_BEACON_DISABLE)
+ ahw->beacon_state = QLCNIC_BEACON_OFF;
+ else if (beacon_state == QLCNIC_BEACON_EANBLE)
+ ahw->beacon_state = QLCNIC_BEACON_ON;
+ }
+ }
+ qlcnic_free_mbx_args(&cmd);
+ }
+
+ return;
+}
+
+void qlcnic_82xx_get_func_no(struct qlcnic_adapter *adapter)
+{
+ void __iomem *msix_base_addr;
+ u32 func;
+ u32 msix_base;
+
+ pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func);
+ msix_base_addr = adapter->ahw->pci_base0 + QLCNIC_MSIX_BASE;
+ msix_base = readl(msix_base_addr);
+ func = (func - msix_base) / QLCNIC_MSIX_TBL_PGSIZE;
+ adapter->ahw->pci_func = func;
+}
+
+void qlcnic_82xx_read_crb(struct qlcnic_adapter *adapter, char *buf,
+ loff_t offset, size_t size)
+{
+ int err = 0;
+ u32 data;
+ u64 qmdata;
+
+ if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
+ qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata);
+ memcpy(buf, &qmdata, size);
+ } else {
+ data = QLCRD32(adapter, offset, &err);
+ memcpy(buf, &data, size);
+ }
+}
+
+void qlcnic_82xx_write_crb(struct qlcnic_adapter *adapter, char *buf,
+ loff_t offset, size_t size)
+{
+ u32 data;
+ u64 qmdata;
+
+ if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
+ memcpy(&qmdata, buf, size);
+ qlcnic_pci_camqm_write_2M(adapter, offset, qmdata);
+ } else {
+ memcpy(&data, buf, size);
+ QLCWR32(adapter, offset, data);
+ }
+}
+
+int qlcnic_82xx_api_lock(struct qlcnic_adapter *adapter)
+{
+ return qlcnic_pcie_sem_lock(adapter, 5, 0);
+}
+
+void qlcnic_82xx_api_unlock(struct qlcnic_adapter *adapter)
+{
+ qlcnic_pcie_sem_unlock(adapter, 5);
+}
+
+int qlcnic_82xx_shutdown(struct pci_dev *pdev)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+
+ netif_device_detach(netdev);
+
+ qlcnic_cancel_idc_work(adapter);
+
+ if (netif_running(netdev))
+ qlcnic_down(adapter, netdev);
+
+ qlcnic_clr_all_drv_state(adapter, 0);
+
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+
+ if (qlcnic_wol_supported(adapter))
+ device_wakeup_enable(&pdev->dev);
+
+ return 0;
+}
+
+int qlcnic_82xx_resume(struct qlcnic_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int err;
+
+ err = qlcnic_start_firmware(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "failed to start firmware\n");
+ return err;
+ }
+
+ if (netif_running(netdev)) {
+ err = qlcnic_up(adapter, netdev);
+ if (!err)
+ qlcnic_restore_indev_addr(netdev, NETDEV_UP);
+ }
+
+ netif_device_attach(netdev);
+ qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
+ return err;
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
new file mode 100644
index 000000000..95ecc84dd
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
@@ -0,0 +1,225 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ */
+
+#ifndef __QLCNIC_HW_H
+#define __QLCNIC_HW_H
+
+/* Common registers in 83xx and 82xx */
+enum qlcnic_regs {
+ QLCNIC_PEG_HALT_STATUS1 = 0,
+ QLCNIC_PEG_HALT_STATUS2,
+ QLCNIC_PEG_ALIVE_COUNTER,
+ QLCNIC_FLASH_LOCK_OWNER,
+ QLCNIC_FW_CAPABILITIES,
+ QLCNIC_CRB_DRV_ACTIVE,
+ QLCNIC_CRB_DEV_STATE,
+ QLCNIC_CRB_DRV_STATE,
+ QLCNIC_CRB_DRV_SCRATCH,
+ QLCNIC_CRB_DEV_PARTITION_INFO,
+ QLCNIC_CRB_DRV_IDC_VER,
+ QLCNIC_FW_VERSION_MAJOR,
+ QLCNIC_FW_VERSION_MINOR,
+ QLCNIC_FW_VERSION_SUB,
+ QLCNIC_CRB_DEV_NPAR_STATE,
+ QLCNIC_FW_IMG_VALID,
+ QLCNIC_CMDPEG_STATE,
+ QLCNIC_RCVPEG_STATE,
+ QLCNIC_ASIC_TEMP,
+ QLCNIC_FW_API,
+ QLCNIC_DRV_OP_MODE,
+ QLCNIC_FLASH_LOCK,
+ QLCNIC_FLASH_UNLOCK,
+};
+
+/* Read from an address offset from BAR0, existing registers */
+#define QLC_SHARED_REG_RD32(a, addr) \
+ readl(((a)->ahw->pci_base0) + ((a)->ahw->reg_tbl[addr]))
+
+/* Write to an address offset from BAR0, existing registers */
+#define QLC_SHARED_REG_WR32(a, addr, value) \
+ writel(value, ((a)->ahw->pci_base0) + ((a)->ahw->reg_tbl[addr]))
+
+/* Read from a direct address offset from BAR0, additional registers */
+#define QLCRDX(ahw, addr) \
+ readl(((ahw)->pci_base0) + ((ahw)->ext_reg_tbl[addr]))
+
+/* Write to a direct address offset from BAR0, additional registers */
+#define QLCWRX(ahw, addr, value) \
+ writel(value, (((ahw)->pci_base0) + ((ahw)->ext_reg_tbl[addr])))
+
+#define QLCNIC_CMD_CONFIGURE_IP_ADDR 0x1
+#define QLCNIC_CMD_CONFIG_INTRPT 0x2
+#define QLCNIC_CMD_CREATE_RX_CTX 0x7
+#define QLCNIC_CMD_DESTROY_RX_CTX 0x8
+#define QLCNIC_CMD_CREATE_TX_CTX 0x9
+#define QLCNIC_CMD_DESTROY_TX_CTX 0xa
+#define QLCNIC_CMD_CONFIGURE_LRO 0xC
+#define QLCNIC_CMD_CONFIGURE_MAC_LEARNING 0xD
+#define QLCNIC_CMD_GET_STATISTICS 0xF
+#define QLCNIC_CMD_INTRPT_TEST 0x11
+#define QLCNIC_CMD_SET_MTU 0x12
+#define QLCNIC_CMD_READ_PHY 0x13
+#define QLCNIC_CMD_WRITE_PHY 0x14
+#define QLCNIC_CMD_READ_HW_REG 0x15
+#define QLCNIC_CMD_GET_FLOW_CTL 0x16
+#define QLCNIC_CMD_SET_FLOW_CTL 0x17
+#define QLCNIC_CMD_READ_MAX_MTU 0x18
+#define QLCNIC_CMD_READ_MAX_LRO 0x19
+#define QLCNIC_CMD_MAC_ADDRESS 0x1f
+#define QLCNIC_CMD_GET_PCI_INFO 0x20
+#define QLCNIC_CMD_GET_NIC_INFO 0x21
+#define QLCNIC_CMD_SET_NIC_INFO 0x22
+#define QLCNIC_CMD_GET_ESWITCH_CAPABILITY 0x24
+#define QLCNIC_CMD_TOGGLE_ESWITCH 0x25
+#define QLCNIC_CMD_GET_ESWITCH_STATUS 0x26
+#define QLCNIC_CMD_SET_PORTMIRRORING 0x27
+#define QLCNIC_CMD_CONFIGURE_ESWITCH 0x28
+#define QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG 0x29
+#define QLCNIC_CMD_GET_ESWITCH_STATS 0x2a
+#define QLCNIC_CMD_CONFIG_PORT 0x2e
+#define QLCNIC_CMD_TEMP_SIZE 0x2f
+#define QLCNIC_CMD_GET_TEMP_HDR 0x30
+#define QLCNIC_CMD_BC_EVENT_SETUP 0x31
+#define QLCNIC_CMD_CONFIG_VPORT 0x32
+#define QLCNIC_CMD_DCB_QUERY_CAP 0x34
+#define QLCNIC_CMD_DCB_QUERY_PARAM 0x35
+#define QLCNIC_CMD_GET_MAC_STATS 0x37
+#define QLCNIC_CMD_82XX_SET_DRV_VER 0x38
+#define QLCNIC_CMD_MQ_TX_CONFIG_INTR 0x39
+#define QLCNIC_CMD_GET_LED_STATUS 0x3C
+#define QLCNIC_CMD_CONFIGURE_RSS 0x41
+#define QLCNIC_CMD_CONFIG_INTR_COAL 0x43
+#define QLCNIC_CMD_CONFIGURE_LED 0x44
+#define QLCNIC_CMD_CONFIG_MAC_VLAN 0x45
+#define QLCNIC_CMD_GET_LINK_EVENT 0x48
+#define QLCNIC_CMD_CONFIGURE_MAC_RX_MODE 0x49
+#define QLCNIC_CMD_CONFIGURE_HW_LRO 0x4A
+#define QLCNIC_CMD_SET_INGRESS_ENCAP 0x4E
+#define QLCNIC_CMD_INIT_NIC_FUNC 0x60
+#define QLCNIC_CMD_STOP_NIC_FUNC 0x61
+#define QLCNIC_CMD_IDC_ACK 0x63
+#define QLCNIC_CMD_SET_PORT_CONFIG 0x66
+#define QLCNIC_CMD_GET_PORT_CONFIG 0x67
+#define QLCNIC_CMD_GET_LINK_STATUS 0x68
+#define QLCNIC_CMD_SET_LED_CONFIG 0x69
+#define QLCNIC_CMD_GET_LED_CONFIG 0x6A
+#define QLCNIC_CMD_83XX_SET_DRV_VER 0x6F
+#define QLCNIC_CMD_ADD_RCV_RINGS 0x0B
+#define QLCNIC_CMD_83XX_EXTEND_ISCSI_DUMP_CAP 0x37
+
+#define QLCNIC_INTRPT_INTX 1
+#define QLCNIC_INTRPT_MSIX 3
+#define QLCNIC_INTRPT_ADD 1
+#define QLCNIC_INTRPT_DEL 2
+
+#define QLCNIC_GET_CURRENT_MAC 1
+#define QLCNIC_SET_STATION_MAC 2
+#define QLCNIC_GET_DEFAULT_MAC 3
+#define QLCNIC_GET_FAC_DEF_MAC 4
+#define QLCNIC_SET_FAC_DEF_MAC 5
+
+#define QLCNIC_MBX_LINK_EVENT 0x8001
+#define QLCNIC_MBX_BC_EVENT 0x8002
+#define QLCNIC_MBX_COMP_EVENT 0x8100
+#define QLCNIC_MBX_REQUEST_EVENT 0x8101
+#define QLCNIC_MBX_TIME_EXTEND_EVENT 0x8102
+#define QLCNIC_MBX_DCBX_CONFIG_CHANGE_EVENT 0x8110
+#define QLCNIC_MBX_SFP_INSERT_EVENT 0x8130
+#define QLCNIC_MBX_SFP_REMOVE_EVENT 0x8131
+
+struct qlcnic_mailbox_metadata {
+ u32 cmd;
+ u32 in_args;
+ u32 out_args;
+};
+
+/* Mailbox ownership */
+#define QLCNIC_GET_OWNER(val) ((val) & (BIT_0 | BIT_1))
+
+#define QLCNIC_SET_OWNER 1
+#define QLCNIC_CLR_OWNER 0
+#define QLCNIC_MBX_TIMEOUT 5000
+
+#define QLCNIC_MBX_RSP_OK 1
+#define QLCNIC_MBX_PORT_RSP_OK 0x1a
+#define QLCNIC_MBX_ASYNC_EVENT BIT_15
+
+/* Set HW Tx ring limit for 82xx adapter. */
+#define QLCNIC_MAX_HW_TX_RINGS 8
+#define QLCNIC_MAX_HW_VNIC_TX_RINGS 4
+#define QLCNIC_MAX_TX_RINGS 8
+#define QLCNIC_MAX_SDS_RINGS 8
+
+struct qlcnic_pci_info;
+struct qlcnic_info;
+struct qlcnic_cmd_args;
+struct ethtool_stats;
+struct pci_device_id;
+struct qlcnic_host_sds_ring;
+struct qlcnic_host_tx_ring;
+struct qlcnic_hardware_context;
+struct qlcnic_adapter;
+struct qlcnic_fw_dump;
+
+int qlcnic_82xx_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong, int *);
+int qlcnic_82xx_hw_write_wx_2M(struct qlcnic_adapter *, ulong, u32);
+int qlcnic_82xx_config_hw_lro(struct qlcnic_adapter *adapter, int);
+int qlcnic_82xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32);
+int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
+ struct net_device *netdev);
+void qlcnic_82xx_get_beacon_state(struct qlcnic_adapter *);
+void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter,
+ u64 *uaddr, u16 vlan_id,
+ struct qlcnic_host_tx_ring *tx_ring);
+int qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *,
+ struct ethtool_coalesce *);
+int qlcnic_82xx_set_rx_coalesce(struct qlcnic_adapter *);
+int qlcnic_82xx_config_rss(struct qlcnic_adapter *adapter, int);
+void qlcnic_82xx_config_ipaddr(struct qlcnic_adapter *adapter,
+ __be32, int);
+int qlcnic_82xx_linkevent_request(struct qlcnic_adapter *adapter, int);
+void qlcnic_82xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring);
+int qlcnic_82xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8);
+int qlcnic_82xx_set_lb_mode(struct qlcnic_adapter *, u8);
+void qlcnic_82xx_write_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
+void qlcnic_82xx_read_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
+int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *);
+int qlcnic_82xx_mq_intrpt(struct qlcnic_adapter *, int);
+int qlcnic_82xx_config_intrpt(struct qlcnic_adapter *, u8);
+int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *);
+int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *,
+ struct qlcnic_host_tx_ring *tx_ring, int);
+void qlcnic_82xx_fw_cmd_del_rx_ctx(struct qlcnic_adapter *);
+void qlcnic_82xx_fw_cmd_del_tx_ctx(struct qlcnic_adapter *,
+ struct qlcnic_host_tx_ring *);
+int qlcnic_82xx_sre_macaddr_change(struct qlcnic_adapter *, u8 *, u16, u8);
+int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *, u8*, u8);
+int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *, struct qlcnic_info *, u8);
+int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
+int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info*);
+int qlcnic_82xx_alloc_mbx_args(struct qlcnic_cmd_args *,
+ struct qlcnic_adapter *, u32);
+int qlcnic_82xx_get_board_info(struct qlcnic_adapter *);
+int qlcnic_82xx_config_led(struct qlcnic_adapter *, u32, u32);
+void qlcnic_82xx_get_func_no(struct qlcnic_adapter *);
+int qlcnic_82xx_api_lock(struct qlcnic_adapter *);
+void qlcnic_82xx_api_unlock(struct qlcnic_adapter *);
+void qlcnic_82xx_napi_enable(struct qlcnic_adapter *);
+void qlcnic_82xx_napi_disable(struct qlcnic_adapter *);
+void qlcnic_82xx_napi_del(struct qlcnic_adapter *);
+int qlcnic_82xx_shutdown(struct pci_dev *);
+int qlcnic_82xx_resume(struct qlcnic_adapter *);
+void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed);
+void qlcnic_fw_poll_work(struct work_struct *work);
+
+u32 qlcnic_82xx_get_saved_state(void *, u32);
+void qlcnic_82xx_set_saved_state(void *, u32, u32);
+void qlcnic_82xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *);
+u32 qlcnic_82xx_get_cap_size(void *, int);
+void qlcnic_82xx_set_sys_info(void *, int, u32);
+void qlcnic_82xx_store_cap_mask(void *, u32);
+#endif /* __QLCNIC_HW_H_ */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
new file mode 100644
index 000000000..09f20c794
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
@@ -0,0 +1,1302 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ */
+
+#include "qlcnic.h"
+#include "qlcnic_hw.h"
+
+struct crb_addr_pair {
+ u32 addr;
+ u32 data;
+};
+
+#define QLCNIC_MAX_CRB_XFORM 60
+static unsigned int crb_addr_xform[QLCNIC_MAX_CRB_XFORM];
+
+#define crb_addr_transform(name) \
+ (crb_addr_xform[QLCNIC_HW_PX_MAP_CRB_##name] = \
+ QLCNIC_HW_CRB_HUB_AGT_ADR_##name << 20)
+
+#define QLCNIC_ADDR_ERROR (0xffffffff)
+
+static int
+qlcnic_check_fw_hearbeat(struct qlcnic_adapter *adapter);
+
+static void crb_addr_transform_setup(void)
+{
+ crb_addr_transform(XDMA);
+ crb_addr_transform(TIMR);
+ crb_addr_transform(SRE);
+ crb_addr_transform(SQN3);
+ crb_addr_transform(SQN2);
+ crb_addr_transform(SQN1);
+ crb_addr_transform(SQN0);
+ crb_addr_transform(SQS3);
+ crb_addr_transform(SQS2);
+ crb_addr_transform(SQS1);
+ crb_addr_transform(SQS0);
+ crb_addr_transform(RPMX7);
+ crb_addr_transform(RPMX6);
+ crb_addr_transform(RPMX5);
+ crb_addr_transform(RPMX4);
+ crb_addr_transform(RPMX3);
+ crb_addr_transform(RPMX2);
+ crb_addr_transform(RPMX1);
+ crb_addr_transform(RPMX0);
+ crb_addr_transform(ROMUSB);
+ crb_addr_transform(SN);
+ crb_addr_transform(QMN);
+ crb_addr_transform(QMS);
+ crb_addr_transform(PGNI);
+ crb_addr_transform(PGND);
+ crb_addr_transform(PGN3);
+ crb_addr_transform(PGN2);
+ crb_addr_transform(PGN1);
+ crb_addr_transform(PGN0);
+ crb_addr_transform(PGSI);
+ crb_addr_transform(PGSD);
+ crb_addr_transform(PGS3);
+ crb_addr_transform(PGS2);
+ crb_addr_transform(PGS1);
+ crb_addr_transform(PGS0);
+ crb_addr_transform(PS);
+ crb_addr_transform(PH);
+ crb_addr_transform(NIU);
+ crb_addr_transform(I2Q);
+ crb_addr_transform(EG);
+ crb_addr_transform(MN);
+ crb_addr_transform(MS);
+ crb_addr_transform(CAS2);
+ crb_addr_transform(CAS1);
+ crb_addr_transform(CAS0);
+ crb_addr_transform(CAM);
+ crb_addr_transform(C2C1);
+ crb_addr_transform(C2C0);
+ crb_addr_transform(SMB);
+ crb_addr_transform(OCM0);
+ crb_addr_transform(I2C0);
+}
+
+void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_recv_context *recv_ctx;
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct qlcnic_rx_buffer *rx_buf;
+ int i, ring;
+
+ recv_ctx = adapter->recv_ctx;
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+ for (i = 0; i < rds_ring->num_desc; ++i) {
+ rx_buf = &(rds_ring->rx_buf_arr[i]);
+ if (rx_buf->skb == NULL)
+ continue;
+
+ dma_unmap_single(&adapter->pdev->dev, rx_buf->dma,
+ rds_ring->dma_size, DMA_FROM_DEVICE);
+
+ dev_kfree_skb_any(rx_buf->skb);
+ }
+ }
+}
+
+void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_recv_context *recv_ctx;
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct qlcnic_rx_buffer *rx_buf;
+ int i, ring;
+
+ recv_ctx = adapter->recv_ctx;
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+
+ INIT_LIST_HEAD(&rds_ring->free_list);
+
+ rx_buf = rds_ring->rx_buf_arr;
+ for (i = 0; i < rds_ring->num_desc; i++) {
+ list_add_tail(&rx_buf->list,
+ &rds_ring->free_list);
+ rx_buf++;
+ }
+ }
+}
+
+void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ struct qlcnic_cmd_buffer *cmd_buf;
+ struct qlcnic_skb_frag *buffrag;
+ int i, j;
+
+ spin_lock(&tx_ring->tx_clean_lock);
+
+ cmd_buf = tx_ring->cmd_buf_arr;
+ for (i = 0; i < tx_ring->num_desc; i++) {
+ buffrag = cmd_buf->frag_array;
+ if (buffrag->dma) {
+ dma_unmap_single(&adapter->pdev->dev, buffrag->dma,
+ buffrag->length, DMA_TO_DEVICE);
+ buffrag->dma = 0ULL;
+ }
+ for (j = 1; j < cmd_buf->frag_count; j++) {
+ buffrag++;
+ if (buffrag->dma) {
+ dma_unmap_page(&adapter->pdev->dev,
+ buffrag->dma, buffrag->length,
+ DMA_TO_DEVICE);
+ buffrag->dma = 0ULL;
+ }
+ }
+ if (cmd_buf->skb) {
+ dev_kfree_skb_any(cmd_buf->skb);
+ cmd_buf->skb = NULL;
+ }
+ cmd_buf++;
+ }
+
+ spin_unlock(&tx_ring->tx_clean_lock);
+}
+
+void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_recv_context *recv_ctx;
+ struct qlcnic_host_rds_ring *rds_ring;
+ int ring;
+
+ recv_ctx = adapter->recv_ctx;
+
+ if (recv_ctx->rds_rings == NULL)
+ return;
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+ vfree(rds_ring->rx_buf_arr);
+ rds_ring->rx_buf_arr = NULL;
+ }
+ kfree(recv_ctx->rds_rings);
+}
+
+int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_recv_context *recv_ctx;
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_rx_buffer *rx_buf;
+ int ring, i;
+
+ recv_ctx = adapter->recv_ctx;
+
+ rds_ring = kcalloc(adapter->max_rds_rings,
+ sizeof(struct qlcnic_host_rds_ring), GFP_KERNEL);
+ if (rds_ring == NULL)
+ goto err_out;
+
+ recv_ctx->rds_rings = rds_ring;
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+ switch (ring) {
+ case RCV_RING_NORMAL:
+ rds_ring->num_desc = adapter->num_rxd;
+ rds_ring->dma_size = QLCNIC_P3P_RX_BUF_MAX_LEN;
+ rds_ring->skb_size = rds_ring->dma_size + NET_IP_ALIGN;
+ break;
+
+ case RCV_RING_JUMBO:
+ rds_ring->num_desc = adapter->num_jumbo_rxd;
+ rds_ring->dma_size =
+ QLCNIC_P3P_RX_JUMBO_BUF_MAX_LEN;
+
+ if (adapter->ahw->capabilities &
+ QLCNIC_FW_CAPABILITY_HW_LRO)
+ rds_ring->dma_size += QLCNIC_LRO_BUFFER_EXTRA;
+
+ rds_ring->skb_size =
+ rds_ring->dma_size + NET_IP_ALIGN;
+ break;
+ }
+ rds_ring->rx_buf_arr = vzalloc(RCV_BUFF_RINGSIZE(rds_ring));
+ if (rds_ring->rx_buf_arr == NULL)
+ goto err_out;
+
+ INIT_LIST_HEAD(&rds_ring->free_list);
+ /*
+ * Now go through all of them, set reference handles
+ * and put them in the queues.
+ */
+ rx_buf = rds_ring->rx_buf_arr;
+ for (i = 0; i < rds_ring->num_desc; i++) {
+ list_add_tail(&rx_buf->list,
+ &rds_ring->free_list);
+ rx_buf->ref_handle = i;
+ rx_buf++;
+ }
+ spin_lock_init(&rds_ring->lock);
+ }
+
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ sds_ring->irq = adapter->msix_entries[ring].vector;
+ sds_ring->adapter = adapter;
+ sds_ring->num_desc = adapter->num_rxd;
+ if (qlcnic_82xx_check(adapter)) {
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test)
+ sds_ring->tx_ring = &adapter->tx_ring[ring];
+ else
+ sds_ring->tx_ring = &adapter->tx_ring[0];
+ }
+ for (i = 0; i < NUM_RCV_DESC_RINGS; i++)
+ INIT_LIST_HEAD(&sds_ring->free_list[i]);
+ }
+
+ return 0;
+
+err_out:
+ qlcnic_free_sw_resources(adapter);
+ return -ENOMEM;
+}
+
+/*
+ * Utility to translate from internal Phantom CRB address
+ * to external PCI CRB address.
+ */
+static u32 qlcnic_decode_crb_addr(u32 addr)
+{
+ int i;
+ u32 base_addr, offset, pci_base;
+
+ crb_addr_transform_setup();
+
+ pci_base = QLCNIC_ADDR_ERROR;
+ base_addr = addr & 0xfff00000;
+ offset = addr & 0x000fffff;
+
+ for (i = 0; i < QLCNIC_MAX_CRB_XFORM; i++) {
+ if (crb_addr_xform[i] == base_addr) {
+ pci_base = i << 20;
+ break;
+ }
+ }
+ if (pci_base == QLCNIC_ADDR_ERROR)
+ return pci_base;
+ else
+ return pci_base + offset;
+}
+
+#define QLCNIC_MAX_ROM_WAIT_USEC 100
+
+static int qlcnic_wait_rom_done(struct qlcnic_adapter *adapter)
+{
+ long timeout = 0;
+ long done = 0;
+ int err = 0;
+
+ cond_resched();
+ while (done == 0) {
+ done = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_STATUS, &err);
+ done &= 2;
+ if (++timeout >= QLCNIC_MAX_ROM_WAIT_USEC) {
+ dev_err(&adapter->pdev->dev,
+ "Timeout reached waiting for rom done");
+ return -EIO;
+ }
+ udelay(1);
+ }
+ return 0;
+}
+
+static int do_rom_fast_read(struct qlcnic_adapter *adapter,
+ u32 addr, u32 *valp)
+{
+ int err = 0;
+
+ QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ADDRESS, addr);
+ QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
+ QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ABYTE_CNT, 3);
+ QLCWR32(adapter, QLCNIC_ROMUSB_ROM_INSTR_OPCODE, 0xb);
+ if (qlcnic_wait_rom_done(adapter)) {
+ dev_err(&adapter->pdev->dev, "Error waiting for rom done\n");
+ return -EIO;
+ }
+ /* reset abyte_cnt and dummy_byte_cnt */
+ QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ABYTE_CNT, 0);
+ udelay(10);
+ QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
+
+ *valp = QLCRD32(adapter, QLCNIC_ROMUSB_ROM_RDATA, &err);
+ if (err == -EIO)
+ return err;
+ return 0;
+}
+
+static int do_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
+ u8 *bytes, size_t size)
+{
+ int addridx;
+ int ret = 0;
+
+ for (addridx = addr; addridx < (addr + size); addridx += 4) {
+ int v;
+ ret = do_rom_fast_read(adapter, addridx, &v);
+ if (ret != 0)
+ break;
+ *(__le32 *)bytes = cpu_to_le32(v);
+ bytes += 4;
+ }
+
+ return ret;
+}
+
+int
+qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
+ u8 *bytes, size_t size)
+{
+ int ret;
+
+ ret = qlcnic_rom_lock(adapter);
+ if (ret < 0)
+ return ret;
+
+ ret = do_rom_fast_read_words(adapter, addr, bytes, size);
+
+ qlcnic_rom_unlock(adapter);
+ return ret;
+}
+
+int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, u32 addr, u32 *valp)
+{
+ int ret;
+
+ if (qlcnic_rom_lock(adapter) != 0)
+ return -EIO;
+
+ ret = do_rom_fast_read(adapter, addr, valp);
+ qlcnic_rom_unlock(adapter);
+ return ret;
+}
+
+int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
+{
+ int addr, err = 0;
+ int i, n, init_delay;
+ struct crb_addr_pair *buf;
+ unsigned offset;
+ u32 off, val;
+ struct pci_dev *pdev = adapter->pdev;
+
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CMDPEG_STATE, 0);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_RCVPEG_STATE, 0);
+
+ /* Halt all the indiviual PEGs and other blocks */
+ /* disable all I2Q */
+ QLCWR32(adapter, QLCNIC_CRB_I2Q + 0x10, 0x0);
+ QLCWR32(adapter, QLCNIC_CRB_I2Q + 0x14, 0x0);
+ QLCWR32(adapter, QLCNIC_CRB_I2Q + 0x18, 0x0);
+ QLCWR32(adapter, QLCNIC_CRB_I2Q + 0x1c, 0x0);
+ QLCWR32(adapter, QLCNIC_CRB_I2Q + 0x20, 0x0);
+ QLCWR32(adapter, QLCNIC_CRB_I2Q + 0x24, 0x0);
+
+ /* disable all niu interrupts */
+ QLCWR32(adapter, QLCNIC_CRB_NIU + 0x40, 0xff);
+ /* disable xge rx/tx */
+ QLCWR32(adapter, QLCNIC_CRB_NIU + 0x70000, 0x00);
+ /* disable xg1 rx/tx */
+ QLCWR32(adapter, QLCNIC_CRB_NIU + 0x80000, 0x00);
+ /* disable sideband mac */
+ QLCWR32(adapter, QLCNIC_CRB_NIU + 0x90000, 0x00);
+ /* disable ap0 mac */
+ QLCWR32(adapter, QLCNIC_CRB_NIU + 0xa0000, 0x00);
+ /* disable ap1 mac */
+ QLCWR32(adapter, QLCNIC_CRB_NIU + 0xb0000, 0x00);
+
+ /* halt sre */
+ val = QLCRD32(adapter, QLCNIC_CRB_SRE + 0x1000, &err);
+ if (err == -EIO)
+ return err;
+ QLCWR32(adapter, QLCNIC_CRB_SRE + 0x1000, val & (~(0x1)));
+
+ /* halt epg */
+ QLCWR32(adapter, QLCNIC_CRB_EPG + 0x1300, 0x1);
+
+ /* halt timers */
+ QLCWR32(adapter, QLCNIC_CRB_TIMER + 0x0, 0x0);
+ QLCWR32(adapter, QLCNIC_CRB_TIMER + 0x8, 0x0);
+ QLCWR32(adapter, QLCNIC_CRB_TIMER + 0x10, 0x0);
+ QLCWR32(adapter, QLCNIC_CRB_TIMER + 0x18, 0x0);
+ QLCWR32(adapter, QLCNIC_CRB_TIMER + 0x100, 0x0);
+ QLCWR32(adapter, QLCNIC_CRB_TIMER + 0x200, 0x0);
+ /* halt pegs */
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x3c, 1);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x3c, 1);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x3c, 1);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x3c, 1);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x3c, 1);
+ msleep(20);
+
+ /* big hammer don't reset CAM block on reset */
+ QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0xfeffffff);
+
+ /* Init HW CRB block */
+ if (qlcnic_rom_fast_read(adapter, 0, &n) != 0 || (n != 0xcafecafe) ||
+ qlcnic_rom_fast_read(adapter, 4, &n) != 0) {
+ dev_err(&pdev->dev, "ERROR Reading crb_init area: val:%x\n", n);
+ return -EIO;
+ }
+ offset = n & 0xffffU;
+ n = (n >> 16) & 0xffffU;
+
+ if (n >= 1024) {
+ dev_err(&pdev->dev, "QLOGIC card flash not initialized.\n");
+ return -EIO;
+ }
+
+ buf = kcalloc(n, sizeof(struct crb_addr_pair), GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < n; i++) {
+ if (qlcnic_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 ||
+ qlcnic_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) {
+ kfree(buf);
+ return -EIO;
+ }
+
+ buf[i].addr = addr;
+ buf[i].data = val;
+ }
+
+ for (i = 0; i < n; i++) {
+
+ off = qlcnic_decode_crb_addr(buf[i].addr);
+ if (off == QLCNIC_ADDR_ERROR) {
+ dev_err(&pdev->dev, "CRB init value out of range %x\n",
+ buf[i].addr);
+ continue;
+ }
+ off += QLCNIC_PCI_CRBSPACE;
+
+ if (off & 1)
+ continue;
+
+ /* skipping cold reboot MAGIC */
+ if (off == QLCNIC_CAM_RAM(0x1fc))
+ continue;
+ if (off == (QLCNIC_CRB_I2C0 + 0x1c))
+ continue;
+ if (off == (ROMUSB_GLB + 0xbc)) /* do not reset PCI */
+ continue;
+ if (off == (ROMUSB_GLB + 0xa8))
+ continue;
+ if (off == (ROMUSB_GLB + 0xc8)) /* core clock */
+ continue;
+ if (off == (ROMUSB_GLB + 0x24)) /* MN clock */
+ continue;
+ if (off == (ROMUSB_GLB + 0x1c)) /* MS clock */
+ continue;
+ if ((off & 0x0ff00000) == QLCNIC_CRB_DDR_NET)
+ continue;
+ /* skip the function enable register */
+ if (off == QLCNIC_PCIE_REG(PCIE_SETUP_FUNCTION))
+ continue;
+ if (off == QLCNIC_PCIE_REG(PCIE_SETUP_FUNCTION2))
+ continue;
+ if ((off & 0x0ff00000) == QLCNIC_CRB_SMB)
+ continue;
+
+ init_delay = 1;
+ /* After writing this register, HW needs time for CRB */
+ /* to quiet down (else crb_window returns 0xffffffff) */
+ if (off == QLCNIC_ROMUSB_GLB_SW_RESET)
+ init_delay = 1000;
+
+ QLCWR32(adapter, off, buf[i].data);
+
+ msleep(init_delay);
+ }
+ kfree(buf);
+
+ /* Initialize protocol process engine */
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_D + 0xec, 0x1e);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_D + 0x4c, 8);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_I + 0x4c, 8);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x8, 0);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0xc, 0);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x8, 0);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_1 + 0xc, 0);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x8, 0);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_2 + 0xc, 0);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x8, 0);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0xc, 0);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x8, 0);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0xc, 0);
+ usleep_range(1000, 1500);
+
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0);
+
+ return 0;
+}
+
+static int qlcnic_cmd_peg_ready(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+ int retries = QLCNIC_CMDPEG_CHECK_RETRY_COUNT;
+
+ do {
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CMDPEG_STATE);
+
+ switch (val) {
+ case PHAN_INITIALIZE_COMPLETE:
+ case PHAN_INITIALIZE_ACK:
+ return 0;
+ case PHAN_INITIALIZE_FAILED:
+ goto out_err;
+ default:
+ break;
+ }
+
+ msleep(QLCNIC_CMDPEG_CHECK_DELAY);
+
+ } while (--retries);
+
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CMDPEG_STATE,
+ PHAN_INITIALIZE_FAILED);
+
+out_err:
+ dev_err(&adapter->pdev->dev, "Command Peg initialization not "
+ "complete, state: 0x%x.\n", val);
+ return -EIO;
+}
+
+static int
+qlcnic_receive_peg_ready(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+ int retries = QLCNIC_RCVPEG_CHECK_RETRY_COUNT;
+
+ do {
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_RCVPEG_STATE);
+
+ if (val == PHAN_PEG_RCV_INITIALIZED)
+ return 0;
+
+ msleep(QLCNIC_RCVPEG_CHECK_DELAY);
+
+ } while (--retries);
+
+ dev_err(&adapter->pdev->dev, "Receive Peg initialization not complete, state: 0x%x.\n",
+ val);
+ return -EIO;
+}
+
+int
+qlcnic_check_fw_status(struct qlcnic_adapter *adapter)
+{
+ int err;
+
+ err = qlcnic_cmd_peg_ready(adapter);
+ if (err)
+ return err;
+
+ err = qlcnic_receive_peg_ready(adapter);
+ if (err)
+ return err;
+
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CMDPEG_STATE, PHAN_INITIALIZE_ACK);
+
+ return err;
+}
+
+int
+qlcnic_setup_idc_param(struct qlcnic_adapter *adapter) {
+
+ int timeo;
+ u32 val;
+
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_PARTITION_INFO);
+ val = QLC_DEV_GET_DRV(val, adapter->portnum);
+ if ((val & 0x3) != QLCNIC_TYPE_NIC) {
+ dev_err(&adapter->pdev->dev,
+ "Not an Ethernet NIC func=%u\n", val);
+ return -EIO;
+ }
+ adapter->ahw->physical_port = (val >> 2);
+ if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DEV_INIT_TIMEOUT, &timeo))
+ timeo = QLCNIC_INIT_TIMEOUT_SECS;
+
+ adapter->dev_init_timeo = timeo;
+
+ if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DRV_RESET_TIMEOUT, &timeo))
+ timeo = QLCNIC_RESET_TIMEOUT_SECS;
+
+ adapter->reset_ack_timeo = timeo;
+
+ return 0;
+}
+
+static int qlcnic_get_flt_entry(struct qlcnic_adapter *adapter, u8 region,
+ struct qlcnic_flt_entry *region_entry)
+{
+ struct qlcnic_flt_header flt_hdr;
+ struct qlcnic_flt_entry *flt_entry;
+ int i = 0, ret;
+ u32 entry_size;
+
+ memset(region_entry, 0, sizeof(struct qlcnic_flt_entry));
+ ret = qlcnic_rom_fast_read_words(adapter, QLCNIC_FLT_LOCATION,
+ (u8 *)&flt_hdr,
+ sizeof(struct qlcnic_flt_header));
+ if (ret) {
+ dev_warn(&adapter->pdev->dev,
+ "error reading flash layout header\n");
+ return -EIO;
+ }
+
+ entry_size = flt_hdr.len - sizeof(struct qlcnic_flt_header);
+ flt_entry = vzalloc(entry_size);
+ if (flt_entry == NULL)
+ return -EIO;
+
+ ret = qlcnic_rom_fast_read_words(adapter, QLCNIC_FLT_LOCATION +
+ sizeof(struct qlcnic_flt_header),
+ (u8 *)flt_entry, entry_size);
+ if (ret) {
+ dev_warn(&adapter->pdev->dev,
+ "error reading flash layout entries\n");
+ goto err_out;
+ }
+
+ while (i < (entry_size/sizeof(struct qlcnic_flt_entry))) {
+ if (flt_entry[i].region == region)
+ break;
+ i++;
+ }
+ if (i >= (entry_size/sizeof(struct qlcnic_flt_entry))) {
+ dev_warn(&adapter->pdev->dev,
+ "region=%x not found in %d regions\n", region, i);
+ ret = -EIO;
+ goto err_out;
+ }
+ memcpy(region_entry, &flt_entry[i], sizeof(struct qlcnic_flt_entry));
+
+err_out:
+ vfree(flt_entry);
+ return ret;
+}
+
+int
+qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_flt_entry fw_entry;
+ u32 ver = -1, min_ver;
+ int ret;
+
+ if (adapter->ahw->revision_id == QLCNIC_P3P_C0)
+ ret = qlcnic_get_flt_entry(adapter, QLCNIC_C0_FW_IMAGE_REGION,
+ &fw_entry);
+ else
+ ret = qlcnic_get_flt_entry(adapter, QLCNIC_B0_FW_IMAGE_REGION,
+ &fw_entry);
+
+ if (!ret)
+ /* 0-4:-signature, 4-8:-fw version */
+ qlcnic_rom_fast_read(adapter, fw_entry.start_addr + 4,
+ (int *)&ver);
+ else
+ qlcnic_rom_fast_read(adapter, QLCNIC_FW_VERSION_OFFSET,
+ (int *)&ver);
+
+ ver = QLCNIC_DECODE_VERSION(ver);
+ min_ver = QLCNIC_MIN_FW_VERSION;
+
+ if (ver < min_ver) {
+ dev_err(&adapter->pdev->dev,
+ "firmware version %d.%d.%d unsupported."
+ "Min supported version %d.%d.%d\n",
+ _major(ver), _minor(ver), _build(ver),
+ _major(min_ver), _minor(min_ver), _build(min_ver));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+qlcnic_has_mn(struct qlcnic_adapter *adapter)
+{
+ u32 capability = 0;
+ int err = 0;
+
+ capability = QLCRD32(adapter, QLCNIC_PEG_TUNE_CAPABILITY, &err);
+ if (err == -EIO)
+ return err;
+ if (capability & QLCNIC_PEG_TUNE_MN_PRESENT)
+ return 1;
+
+ return 0;
+}
+
+static
+struct uni_table_desc *qlcnic_get_table_desc(const u8 *unirom, int section)
+{
+ u32 i, entries;
+ struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0];
+ entries = le32_to_cpu(directory->num_entries);
+
+ for (i = 0; i < entries; i++) {
+
+ u32 offs = le32_to_cpu(directory->findex) +
+ i * le32_to_cpu(directory->entry_size);
+ u32 tab_type = le32_to_cpu(*((__le32 *)&unirom[offs] + 8));
+
+ if (tab_type == section)
+ return (struct uni_table_desc *) &unirom[offs];
+ }
+
+ return NULL;
+}
+
+#define FILEHEADER_SIZE (14 * 4)
+
+static int
+qlcnic_validate_header(struct qlcnic_adapter *adapter)
+{
+ const u8 *unirom = adapter->fw->data;
+ struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0];
+ u32 entries, entry_size, tab_size, fw_file_size;
+
+ fw_file_size = adapter->fw->size;
+
+ if (fw_file_size < FILEHEADER_SIZE)
+ return -EINVAL;
+
+ entries = le32_to_cpu(directory->num_entries);
+ entry_size = le32_to_cpu(directory->entry_size);
+ tab_size = le32_to_cpu(directory->findex) + (entries * entry_size);
+
+ if (fw_file_size < tab_size)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+qlcnic_validate_bootld(struct qlcnic_adapter *adapter)
+{
+ struct uni_table_desc *tab_desc;
+ struct uni_data_desc *descr;
+ u32 offs, tab_size, data_size, idx;
+ const u8 *unirom = adapter->fw->data;
+ __le32 temp;
+
+ temp = *((__le32 *)&unirom[adapter->file_prd_off] +
+ QLCNIC_UNI_BOOTLD_IDX_OFF);
+ idx = le32_to_cpu(temp);
+ tab_desc = qlcnic_get_table_desc(unirom, QLCNIC_UNI_DIR_SECT_BOOTLD);
+
+ if (!tab_desc)
+ return -EINVAL;
+
+ tab_size = le32_to_cpu(tab_desc->findex) +
+ le32_to_cpu(tab_desc->entry_size) * (idx + 1);
+
+ if (adapter->fw->size < tab_size)
+ return -EINVAL;
+
+ offs = le32_to_cpu(tab_desc->findex) +
+ le32_to_cpu(tab_desc->entry_size) * idx;
+ descr = (struct uni_data_desc *)&unirom[offs];
+
+ data_size = le32_to_cpu(descr->findex) + le32_to_cpu(descr->size);
+
+ if (adapter->fw->size < data_size)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+qlcnic_validate_fw(struct qlcnic_adapter *adapter)
+{
+ struct uni_table_desc *tab_desc;
+ struct uni_data_desc *descr;
+ const u8 *unirom = adapter->fw->data;
+ u32 offs, tab_size, data_size, idx;
+ __le32 temp;
+
+ temp = *((__le32 *)&unirom[adapter->file_prd_off] +
+ QLCNIC_UNI_FIRMWARE_IDX_OFF);
+ idx = le32_to_cpu(temp);
+ tab_desc = qlcnic_get_table_desc(unirom, QLCNIC_UNI_DIR_SECT_FW);
+
+ if (!tab_desc)
+ return -EINVAL;
+
+ tab_size = le32_to_cpu(tab_desc->findex) +
+ le32_to_cpu(tab_desc->entry_size) * (idx + 1);
+
+ if (adapter->fw->size < tab_size)
+ return -EINVAL;
+
+ offs = le32_to_cpu(tab_desc->findex) +
+ le32_to_cpu(tab_desc->entry_size) * idx;
+ descr = (struct uni_data_desc *)&unirom[offs];
+ data_size = le32_to_cpu(descr->findex) + le32_to_cpu(descr->size);
+
+ if (adapter->fw->size < data_size)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+qlcnic_validate_product_offs(struct qlcnic_adapter *adapter)
+{
+ struct uni_table_desc *ptab_descr;
+ const u8 *unirom = adapter->fw->data;
+ int mn_present = qlcnic_has_mn(adapter);
+ u32 entries, entry_size, tab_size, i;
+ __le32 temp;
+
+ ptab_descr = qlcnic_get_table_desc(unirom,
+ QLCNIC_UNI_DIR_SECT_PRODUCT_TBL);
+ if (!ptab_descr)
+ return -EINVAL;
+
+ entries = le32_to_cpu(ptab_descr->num_entries);
+ entry_size = le32_to_cpu(ptab_descr->entry_size);
+ tab_size = le32_to_cpu(ptab_descr->findex) + (entries * entry_size);
+
+ if (adapter->fw->size < tab_size)
+ return -EINVAL;
+
+nomn:
+ for (i = 0; i < entries; i++) {
+
+ u32 flags, file_chiprev, offs;
+ u8 chiprev = adapter->ahw->revision_id;
+ u32 flagbit;
+
+ offs = le32_to_cpu(ptab_descr->findex) +
+ i * le32_to_cpu(ptab_descr->entry_size);
+ temp = *((__le32 *)&unirom[offs] + QLCNIC_UNI_FLAGS_OFF);
+ flags = le32_to_cpu(temp);
+ temp = *((__le32 *)&unirom[offs] + QLCNIC_UNI_CHIP_REV_OFF);
+ file_chiprev = le32_to_cpu(temp);
+
+ flagbit = mn_present ? 1 : 2;
+
+ if ((chiprev == file_chiprev) &&
+ ((1ULL << flagbit) & flags)) {
+ adapter->file_prd_off = offs;
+ return 0;
+ }
+ }
+ if (mn_present) {
+ mn_present = 0;
+ goto nomn;
+ }
+ return -EINVAL;
+}
+
+static int
+qlcnic_validate_unified_romimage(struct qlcnic_adapter *adapter)
+{
+ if (qlcnic_validate_header(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "unified image: header validation failed\n");
+ return -EINVAL;
+ }
+
+ if (qlcnic_validate_product_offs(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "unified image: product validation failed\n");
+ return -EINVAL;
+ }
+
+ if (qlcnic_validate_bootld(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "unified image: bootld validation failed\n");
+ return -EINVAL;
+ }
+
+ if (qlcnic_validate_fw(adapter)) {
+ dev_err(&adapter->pdev->dev,
+ "unified image: firmware validation failed\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static
+struct uni_data_desc *qlcnic_get_data_desc(struct qlcnic_adapter *adapter,
+ u32 section, u32 idx_offset)
+{
+ const u8 *unirom = adapter->fw->data;
+ struct uni_table_desc *tab_desc;
+ u32 offs, idx;
+ __le32 temp;
+
+ temp = *((__le32 *)&unirom[adapter->file_prd_off] + idx_offset);
+ idx = le32_to_cpu(temp);
+
+ tab_desc = qlcnic_get_table_desc(unirom, section);
+
+ if (tab_desc == NULL)
+ return NULL;
+
+ offs = le32_to_cpu(tab_desc->findex) +
+ le32_to_cpu(tab_desc->entry_size) * idx;
+
+ return (struct uni_data_desc *)&unirom[offs];
+}
+
+static u8 *
+qlcnic_get_bootld_offs(struct qlcnic_adapter *adapter)
+{
+ u32 offs = QLCNIC_BOOTLD_START;
+ struct uni_data_desc *data_desc;
+
+ data_desc = qlcnic_get_data_desc(adapter, QLCNIC_UNI_DIR_SECT_BOOTLD,
+ QLCNIC_UNI_BOOTLD_IDX_OFF);
+
+ if (adapter->ahw->fw_type == QLCNIC_UNIFIED_ROMIMAGE)
+ offs = le32_to_cpu(data_desc->findex);
+
+ return (u8 *)&adapter->fw->data[offs];
+}
+
+static u8 *
+qlcnic_get_fw_offs(struct qlcnic_adapter *adapter)
+{
+ u32 offs = QLCNIC_IMAGE_START;
+ struct uni_data_desc *data_desc;
+
+ data_desc = qlcnic_get_data_desc(adapter, QLCNIC_UNI_DIR_SECT_FW,
+ QLCNIC_UNI_FIRMWARE_IDX_OFF);
+ if (adapter->ahw->fw_type == QLCNIC_UNIFIED_ROMIMAGE)
+ offs = le32_to_cpu(data_desc->findex);
+
+ return (u8 *)&adapter->fw->data[offs];
+}
+
+static u32 qlcnic_get_fw_size(struct qlcnic_adapter *adapter)
+{
+ struct uni_data_desc *data_desc;
+ const u8 *unirom = adapter->fw->data;
+
+ data_desc = qlcnic_get_data_desc(adapter, QLCNIC_UNI_DIR_SECT_FW,
+ QLCNIC_UNI_FIRMWARE_IDX_OFF);
+
+ if (adapter->ahw->fw_type == QLCNIC_UNIFIED_ROMIMAGE)
+ return le32_to_cpu(data_desc->size);
+ else
+ return le32_to_cpu(*(__le32 *)&unirom[QLCNIC_FW_SIZE_OFFSET]);
+}
+
+static u32 qlcnic_get_fw_version(struct qlcnic_adapter *adapter)
+{
+ struct uni_data_desc *fw_data_desc;
+ const struct firmware *fw = adapter->fw;
+ u32 major, minor, sub;
+ __le32 version_offset;
+ const u8 *ver_str;
+ int i, ret;
+
+ if (adapter->ahw->fw_type != QLCNIC_UNIFIED_ROMIMAGE) {
+ version_offset = *(__le32 *)&fw->data[QLCNIC_FW_VERSION_OFFSET];
+ return le32_to_cpu(version_offset);
+ }
+
+ fw_data_desc = qlcnic_get_data_desc(adapter, QLCNIC_UNI_DIR_SECT_FW,
+ QLCNIC_UNI_FIRMWARE_IDX_OFF);
+ ver_str = fw->data + le32_to_cpu(fw_data_desc->findex) +
+ le32_to_cpu(fw_data_desc->size) - 17;
+
+ for (i = 0; i < 12; i++) {
+ if (!strncmp(&ver_str[i], "REV=", 4)) {
+ ret = sscanf(&ver_str[i+4], "%u.%u.%u ",
+ &major, &minor, &sub);
+ if (ret != 3)
+ return 0;
+ else
+ return major + (minor << 8) + (sub << 16);
+ }
+ }
+
+ return 0;
+}
+
+static u32 qlcnic_get_bios_version(struct qlcnic_adapter *adapter)
+{
+ const struct firmware *fw = adapter->fw;
+ u32 bios_ver, prd_off = adapter->file_prd_off;
+ u8 *version_offset;
+ __le32 temp;
+
+ if (adapter->ahw->fw_type != QLCNIC_UNIFIED_ROMIMAGE) {
+ version_offset = (u8 *)&fw->data[QLCNIC_BIOS_VERSION_OFFSET];
+ return le32_to_cpu(*(__le32 *)version_offset);
+ }
+
+ temp = *((__le32 *)(&fw->data[prd_off]) + QLCNIC_UNI_BIOS_VERSION_OFF);
+ bios_ver = le32_to_cpu(temp);
+
+ return (bios_ver << 16) + ((bios_ver >> 8) & 0xff00) + (bios_ver >> 24);
+}
+
+static void qlcnic_rom_lock_recovery(struct qlcnic_adapter *adapter)
+{
+ if (qlcnic_pcie_sem_lock(adapter, 2, QLCNIC_ROM_LOCK_ID))
+ dev_info(&adapter->pdev->dev, "Resetting rom_lock\n");
+
+ qlcnic_pcie_sem_unlock(adapter, 2);
+}
+
+static int
+qlcnic_check_fw_hearbeat(struct qlcnic_adapter *adapter)
+{
+ u32 heartbeat, ret = -EIO;
+ int retries = QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT;
+
+ adapter->heartbeat = QLC_SHARED_REG_RD32(adapter,
+ QLCNIC_PEG_ALIVE_COUNTER);
+
+ do {
+ msleep(QLCNIC_HEARTBEAT_PERIOD_MSECS);
+ heartbeat = QLC_SHARED_REG_RD32(adapter,
+ QLCNIC_PEG_ALIVE_COUNTER);
+ if (heartbeat != adapter->heartbeat) {
+ ret = QLCNIC_RCODE_SUCCESS;
+ break;
+ }
+ } while (--retries);
+
+ return ret;
+}
+
+int
+qlcnic_need_fw_reset(struct qlcnic_adapter *adapter)
+{
+ if ((adapter->flags & QLCNIC_FW_HANG) ||
+ qlcnic_check_fw_hearbeat(adapter)) {
+ qlcnic_rom_lock_recovery(adapter);
+ return 1;
+ }
+
+ if (adapter->need_fw_reset)
+ return 1;
+
+ if (adapter->fw)
+ return 1;
+
+ return 0;
+}
+
+static const char *fw_name[] = {
+ QLCNIC_UNIFIED_ROMIMAGE_NAME,
+ QLCNIC_FLASH_ROMIMAGE_NAME,
+};
+
+int
+qlcnic_load_firmware(struct qlcnic_adapter *adapter)
+{
+ __le64 *ptr64;
+ u32 i, flashaddr, size;
+ const struct firmware *fw = adapter->fw;
+ struct pci_dev *pdev = adapter->pdev;
+
+ dev_info(&pdev->dev, "loading firmware from %s\n",
+ fw_name[adapter->ahw->fw_type]);
+
+ if (fw) {
+ u64 data;
+
+ size = (QLCNIC_IMAGE_START - QLCNIC_BOOTLD_START) / 8;
+
+ ptr64 = (__le64 *)qlcnic_get_bootld_offs(adapter);
+ flashaddr = QLCNIC_BOOTLD_START;
+
+ for (i = 0; i < size; i++) {
+ data = le64_to_cpu(ptr64[i]);
+
+ if (qlcnic_pci_mem_write_2M(adapter, flashaddr, data))
+ return -EIO;
+
+ flashaddr += 8;
+ }
+
+ size = qlcnic_get_fw_size(adapter) / 8;
+
+ ptr64 = (__le64 *)qlcnic_get_fw_offs(adapter);
+ flashaddr = QLCNIC_IMAGE_START;
+
+ for (i = 0; i < size; i++) {
+ data = le64_to_cpu(ptr64[i]);
+
+ if (qlcnic_pci_mem_write_2M(adapter,
+ flashaddr, data))
+ return -EIO;
+
+ flashaddr += 8;
+ }
+
+ size = qlcnic_get_fw_size(adapter) % 8;
+ if (size) {
+ data = le64_to_cpu(ptr64[i]);
+
+ if (qlcnic_pci_mem_write_2M(adapter,
+ flashaddr, data))
+ return -EIO;
+ }
+
+ } else {
+ u64 data;
+ u32 hi, lo;
+ int ret;
+ struct qlcnic_flt_entry bootld_entry;
+
+ ret = qlcnic_get_flt_entry(adapter, QLCNIC_BOOTLD_REGION,
+ &bootld_entry);
+ if (!ret) {
+ size = bootld_entry.size / 8;
+ flashaddr = bootld_entry.start_addr;
+ } else {
+ size = (QLCNIC_IMAGE_START - QLCNIC_BOOTLD_START) / 8;
+ flashaddr = QLCNIC_BOOTLD_START;
+ dev_info(&pdev->dev,
+ "using legacy method to get flash fw region");
+ }
+
+ for (i = 0; i < size; i++) {
+ if (qlcnic_rom_fast_read(adapter,
+ flashaddr, (int *)&lo) != 0)
+ return -EIO;
+ if (qlcnic_rom_fast_read(adapter,
+ flashaddr + 4, (int *)&hi) != 0)
+ return -EIO;
+
+ data = (((u64)hi << 32) | lo);
+
+ if (qlcnic_pci_mem_write_2M(adapter,
+ flashaddr, data))
+ return -EIO;
+
+ flashaddr += 8;
+ }
+ }
+ usleep_range(1000, 1500);
+
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x18, 0x1020);
+ QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0x80001e);
+ return 0;
+}
+
+static int
+qlcnic_validate_firmware(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+ u32 ver, bios, min_size;
+ struct pci_dev *pdev = adapter->pdev;
+ const struct firmware *fw = adapter->fw;
+ u8 fw_type = adapter->ahw->fw_type;
+
+ if (fw_type == QLCNIC_UNIFIED_ROMIMAGE) {
+ if (qlcnic_validate_unified_romimage(adapter))
+ return -EINVAL;
+
+ min_size = QLCNIC_UNI_FW_MIN_SIZE;
+ } else {
+ val = le32_to_cpu(*(__le32 *)&fw->data[QLCNIC_FW_MAGIC_OFFSET]);
+ if (val != QLCNIC_BDINFO_MAGIC)
+ return -EINVAL;
+
+ min_size = QLCNIC_FW_MIN_SIZE;
+ }
+
+ if (fw->size < min_size)
+ return -EINVAL;
+
+ val = qlcnic_get_fw_version(adapter);
+ ver = QLCNIC_DECODE_VERSION(val);
+
+ if (ver < QLCNIC_MIN_FW_VERSION) {
+ dev_err(&pdev->dev,
+ "%s: firmware version %d.%d.%d unsupported\n",
+ fw_name[fw_type], _major(ver), _minor(ver), _build(ver));
+ return -EINVAL;
+ }
+
+ val = qlcnic_get_bios_version(adapter);
+ qlcnic_rom_fast_read(adapter, QLCNIC_BIOS_VERSION_OFFSET, (int *)&bios);
+ if (val != bios) {
+ dev_err(&pdev->dev, "%s: firmware bios is incompatible\n",
+ fw_name[fw_type]);
+ return -EINVAL;
+ }
+
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID, QLCNIC_BDINFO_MAGIC);
+ return 0;
+}
+
+static void
+qlcnic_get_next_fwtype(struct qlcnic_adapter *adapter)
+{
+ u8 fw_type;
+
+ switch (adapter->ahw->fw_type) {
+ case QLCNIC_UNKNOWN_ROMIMAGE:
+ fw_type = QLCNIC_UNIFIED_ROMIMAGE;
+ break;
+
+ case QLCNIC_UNIFIED_ROMIMAGE:
+ default:
+ fw_type = QLCNIC_FLASH_ROMIMAGE;
+ break;
+ }
+
+ adapter->ahw->fw_type = fw_type;
+}
+
+
+
+void qlcnic_request_firmware(struct qlcnic_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int rc;
+
+ adapter->ahw->fw_type = QLCNIC_UNKNOWN_ROMIMAGE;
+
+next:
+ qlcnic_get_next_fwtype(adapter);
+
+ if (adapter->ahw->fw_type == QLCNIC_FLASH_ROMIMAGE) {
+ adapter->fw = NULL;
+ } else {
+ rc = request_firmware(&adapter->fw,
+ fw_name[adapter->ahw->fw_type],
+ &pdev->dev);
+ if (rc != 0)
+ goto next;
+
+ rc = qlcnic_validate_firmware(adapter);
+ if (rc != 0) {
+ release_firmware(adapter->fw);
+ usleep_range(1000, 1500);
+ goto next;
+ }
+ }
+}
+
+
+void
+qlcnic_release_firmware(struct qlcnic_adapter *adapter)
+{
+ release_firmware(adapter->fw);
+ adapter->fw = NULL;
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
new file mode 100644
index 000000000..41894d154
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -0,0 +1,2227 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ */
+
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+#include <net/ip.h>
+#include <linux/ipv6.h>
+#include <net/checksum.h>
+#include <linux/printk.h>
+#include <linux/jiffies.h>
+
+#include "qlcnic.h"
+
+#define QLCNIC_TX_ETHER_PKT 0x01
+#define QLCNIC_TX_TCP_PKT 0x02
+#define QLCNIC_TX_UDP_PKT 0x03
+#define QLCNIC_TX_IP_PKT 0x04
+#define QLCNIC_TX_TCP_LSO 0x05
+#define QLCNIC_TX_TCP_LSO6 0x06
+#define QLCNIC_TX_ENCAP_PKT 0x07
+#define QLCNIC_TX_ENCAP_LSO 0x08
+#define QLCNIC_TX_TCPV6_PKT 0x0b
+#define QLCNIC_TX_UDPV6_PKT 0x0c
+
+#define QLCNIC_FLAGS_VLAN_TAGGED 0x10
+#define QLCNIC_FLAGS_VLAN_OOB 0x40
+
+#define qlcnic_set_tx_vlan_tci(cmd_desc, v) \
+ (cmd_desc)->vlan_TCI = cpu_to_le16(v);
+#define qlcnic_set_cmd_desc_port(cmd_desc, var) \
+ ((cmd_desc)->port_ctxid |= ((var) & 0x0F))
+#define qlcnic_set_cmd_desc_ctxid(cmd_desc, var) \
+ ((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0))
+
+#define qlcnic_set_tx_port(_desc, _port) \
+ ((_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0))
+
+#define qlcnic_set_tx_flags_opcode(_desc, _flags, _opcode) \
+ ((_desc)->flags_opcode |= \
+ cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7)))
+
+#define qlcnic_set_tx_frags_len(_desc, _frags, _len) \
+ ((_desc)->nfrags__length = \
+ cpu_to_le32(((_frags) & 0xff) | (((_len) & 0xffffff) << 8)))
+
+/* owner bits of status_desc */
+#define STATUS_OWNER_HOST (0x1ULL << 56)
+#define STATUS_OWNER_PHANTOM (0x2ULL << 56)
+
+/* Status descriptor:
+ 0-3 port, 4-7 status, 8-11 type, 12-27 total_length
+ 28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset
+ 53-55 desc_cnt, 56-57 owner, 58-63 opcode
+ */
+#define qlcnic_get_sts_port(sts_data) \
+ ((sts_data) & 0x0F)
+#define qlcnic_get_sts_status(sts_data) \
+ (((sts_data) >> 4) & 0x0F)
+#define qlcnic_get_sts_type(sts_data) \
+ (((sts_data) >> 8) & 0x0F)
+#define qlcnic_get_sts_totallength(sts_data) \
+ (((sts_data) >> 12) & 0xFFFF)
+#define qlcnic_get_sts_refhandle(sts_data) \
+ (((sts_data) >> 28) & 0xFFFF)
+#define qlcnic_get_sts_prot(sts_data) \
+ (((sts_data) >> 44) & 0x0F)
+#define qlcnic_get_sts_pkt_offset(sts_data) \
+ (((sts_data) >> 48) & 0x1F)
+#define qlcnic_get_sts_desc_cnt(sts_data) \
+ (((sts_data) >> 53) & 0x7)
+#define qlcnic_get_sts_opcode(sts_data) \
+ (((sts_data) >> 58) & 0x03F)
+
+#define qlcnic_get_lro_sts_refhandle(sts_data) \
+ ((sts_data) & 0x07FFF)
+#define qlcnic_get_lro_sts_length(sts_data) \
+ (((sts_data) >> 16) & 0x0FFFF)
+#define qlcnic_get_lro_sts_l2_hdr_offset(sts_data) \
+ (((sts_data) >> 32) & 0x0FF)
+#define qlcnic_get_lro_sts_l4_hdr_offset(sts_data) \
+ (((sts_data) >> 40) & 0x0FF)
+#define qlcnic_get_lro_sts_timestamp(sts_data) \
+ (((sts_data) >> 48) & 0x1)
+#define qlcnic_get_lro_sts_type(sts_data) \
+ (((sts_data) >> 49) & 0x7)
+#define qlcnic_get_lro_sts_push_flag(sts_data) \
+ (((sts_data) >> 52) & 0x1)
+#define qlcnic_get_lro_sts_seq_number(sts_data) \
+ ((sts_data) & 0x0FFFFFFFF)
+#define qlcnic_get_lro_sts_mss(sts_data1) \
+ ((sts_data1 >> 32) & 0x0FFFF)
+
+#define qlcnic_83xx_get_lro_sts_mss(sts) ((sts) & 0xffff)
+
+/* opcode field in status_desc */
+#define QLCNIC_SYN_OFFLOAD 0x03
+#define QLCNIC_RXPKT_DESC 0x04
+#define QLCNIC_OLD_RXPKT_DESC 0x3f
+#define QLCNIC_RESPONSE_DESC 0x05
+#define QLCNIC_LRO_DESC 0x12
+
+#define QLCNIC_TCP_HDR_SIZE 20
+#define QLCNIC_TCP_TS_OPTION_SIZE 12
+#define QLCNIC_FETCH_RING_ID(handle) ((handle) >> 63)
+#define QLCNIC_DESC_OWNER_FW cpu_to_le64(STATUS_OWNER_PHANTOM)
+
+#define QLCNIC_TCP_TS_HDR_SIZE (QLCNIC_TCP_HDR_SIZE + QLCNIC_TCP_TS_OPTION_SIZE)
+
+/* for status field in status_desc */
+#define STATUS_CKSUM_LOOP 0
+#define STATUS_CKSUM_OK 2
+
+#define qlcnic_83xx_pktln(sts) ((sts >> 32) & 0x3FFF)
+#define qlcnic_83xx_hndl(sts) ((sts >> 48) & 0x7FFF)
+#define qlcnic_83xx_csum_status(sts) ((sts >> 39) & 7)
+#define qlcnic_83xx_opcode(sts) ((sts >> 42) & 0xF)
+#define qlcnic_83xx_vlan_tag(sts) (((sts) >> 48) & 0xFFFF)
+#define qlcnic_83xx_lro_pktln(sts) (((sts) >> 32) & 0x3FFF)
+#define qlcnic_83xx_l2_hdr_off(sts) (((sts) >> 16) & 0xFF)
+#define qlcnic_83xx_l4_hdr_off(sts) (((sts) >> 24) & 0xFF)
+#define qlcnic_83xx_pkt_cnt(sts) (((sts) >> 16) & 0x7)
+#define qlcnic_83xx_is_tstamp(sts) (((sts) >> 40) & 1)
+#define qlcnic_83xx_is_psh_bit(sts) (((sts) >> 41) & 1)
+#define qlcnic_83xx_is_ip_align(sts) (((sts) >> 46) & 1)
+#define qlcnic_83xx_has_vlan_tag(sts) (((sts) >> 47) & 1)
+
+static int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring,
+ int max);
+
+static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *,
+ struct qlcnic_host_rds_ring *,
+ u16, u16);
+
+static inline u8 qlcnic_mac_hash(u64 mac, u16 vlan)
+{
+ return (u8)((mac & 0xff) ^ ((mac >> 40) & 0xff) ^ (vlan & 0xff));
+}
+
+static inline u32 qlcnic_get_ref_handle(struct qlcnic_adapter *adapter,
+ u16 handle, u8 ring_id)
+{
+ if (qlcnic_83xx_check(adapter))
+ return handle | (ring_id << 15);
+ else
+ return handle;
+}
+
+static inline int qlcnic_82xx_is_lb_pkt(u64 sts_data)
+{
+ return (qlcnic_get_sts_status(sts_data) == STATUS_CKSUM_LOOP) ? 1 : 0;
+}
+
+static void qlcnic_delete_rx_list_mac(struct qlcnic_adapter *adapter,
+ struct qlcnic_filter *fil,
+ void *addr, u16 vlan_id)
+{
+ int ret;
+ u8 op;
+
+ op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
+ ret = qlcnic_sre_macaddr_change(adapter, addr, vlan_id, op);
+ if (ret)
+ return;
+
+ op = vlan_id ? QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL;
+ ret = qlcnic_sre_macaddr_change(adapter, addr, vlan_id, op);
+ if (!ret) {
+ hlist_del(&fil->fnode);
+ adapter->rx_fhash.fnum--;
+ }
+}
+
+static struct qlcnic_filter *qlcnic_find_mac_filter(struct hlist_head *head,
+ void *addr, u16 vlan_id)
+{
+ struct qlcnic_filter *tmp_fil = NULL;
+ struct hlist_node *n;
+
+ hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
+ if (ether_addr_equal(tmp_fil->faddr, addr) &&
+ tmp_fil->vlan_id == vlan_id)
+ return tmp_fil;
+ }
+
+ return NULL;
+}
+
+static void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter,
+ struct sk_buff *skb, int loopback_pkt, u16 vlan_id)
+{
+ struct ethhdr *phdr = (struct ethhdr *)(skb->data);
+ struct qlcnic_filter *fil, *tmp_fil;
+ struct hlist_head *head;
+ unsigned long time;
+ u64 src_addr = 0;
+ u8 hindex, op;
+ int ret;
+
+ if (!qlcnic_sriov_pf_check(adapter) || (vlan_id == 0xffff))
+ vlan_id = 0;
+
+ memcpy(&src_addr, phdr->h_source, ETH_ALEN);
+ hindex = qlcnic_mac_hash(src_addr, vlan_id) &
+ (adapter->fhash.fbucket_size - 1);
+
+ if (loopback_pkt) {
+ if (adapter->rx_fhash.fnum >= adapter->rx_fhash.fmax)
+ return;
+
+ head = &(adapter->rx_fhash.fhead[hindex]);
+
+ tmp_fil = qlcnic_find_mac_filter(head, &src_addr, vlan_id);
+ if (tmp_fil) {
+ time = tmp_fil->ftime;
+ if (time_after(jiffies, QLCNIC_READD_AGE * HZ + time))
+ tmp_fil->ftime = jiffies;
+ return;
+ }
+
+ fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
+ if (!fil)
+ return;
+
+ fil->ftime = jiffies;
+ memcpy(fil->faddr, &src_addr, ETH_ALEN);
+ fil->vlan_id = vlan_id;
+ spin_lock(&adapter->rx_mac_learn_lock);
+ hlist_add_head(&(fil->fnode), head);
+ adapter->rx_fhash.fnum++;
+ spin_unlock(&adapter->rx_mac_learn_lock);
+ } else {
+ head = &adapter->fhash.fhead[hindex];
+
+ spin_lock(&adapter->mac_learn_lock);
+
+ tmp_fil = qlcnic_find_mac_filter(head, &src_addr, vlan_id);
+ if (tmp_fil) {
+ op = vlan_id ? QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL;
+ ret = qlcnic_sre_macaddr_change(adapter,
+ (u8 *)&src_addr,
+ vlan_id, op);
+ if (!ret) {
+ hlist_del(&tmp_fil->fnode);
+ adapter->fhash.fnum--;
+ }
+
+ spin_unlock(&adapter->mac_learn_lock);
+
+ return;
+ }
+
+ spin_unlock(&adapter->mac_learn_lock);
+
+ head = &adapter->rx_fhash.fhead[hindex];
+
+ spin_lock(&adapter->rx_mac_learn_lock);
+
+ tmp_fil = qlcnic_find_mac_filter(head, &src_addr, vlan_id);
+ if (tmp_fil)
+ qlcnic_delete_rx_list_mac(adapter, tmp_fil, &src_addr,
+ vlan_id);
+
+ spin_unlock(&adapter->rx_mac_learn_lock);
+ }
+}
+
+void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
+ u16 vlan_id, struct qlcnic_host_tx_ring *tx_ring)
+{
+ struct cmd_desc_type0 *hwdesc;
+ struct qlcnic_nic_req *req;
+ struct qlcnic_mac_req *mac_req;
+ struct qlcnic_vlan_req *vlan_req;
+ u32 producer;
+ u64 word;
+
+ producer = tx_ring->producer;
+ hwdesc = &tx_ring->desc_head[tx_ring->producer];
+
+ req = (struct qlcnic_nic_req *)hwdesc;
+ memset(req, 0, sizeof(struct qlcnic_nic_req));
+ req->qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
+
+ word = QLCNIC_MAC_EVENT | ((u64)(adapter->portnum) << 16);
+ req->req_hdr = cpu_to_le64(word);
+
+ mac_req = (struct qlcnic_mac_req *)&(req->words[0]);
+ mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
+ memcpy(mac_req->mac_addr, uaddr, ETH_ALEN);
+
+ vlan_req = (struct qlcnic_vlan_req *)&req->words[1];
+ vlan_req->vlan_id = cpu_to_le16(vlan_id);
+
+ tx_ring->producer = get_next_index(producer, tx_ring->num_desc);
+ smp_mb();
+}
+
+static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
+ struct cmd_desc_type0 *first_desc,
+ struct sk_buff *skb,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ struct vlan_ethhdr *vh = (struct vlan_ethhdr *)(skb->data);
+ struct ethhdr *phdr = (struct ethhdr *)(skb->data);
+ u16 protocol = ntohs(skb->protocol);
+ struct qlcnic_filter *fil, *tmp_fil;
+ struct hlist_head *head;
+ struct hlist_node *n;
+ u64 src_addr = 0;
+ u16 vlan_id = 0;
+ u8 hindex, hval;
+
+ if (ether_addr_equal(phdr->h_source, adapter->mac_addr))
+ return;
+
+ if (adapter->flags & QLCNIC_VLAN_FILTERING) {
+ if (protocol == ETH_P_8021Q) {
+ vh = skb_vlan_eth_hdr(skb);
+ vlan_id = ntohs(vh->h_vlan_TCI);
+ } else if (skb_vlan_tag_present(skb)) {
+ vlan_id = skb_vlan_tag_get(skb);
+ }
+ }
+
+ memcpy(&src_addr, phdr->h_source, ETH_ALEN);
+ hval = qlcnic_mac_hash(src_addr, vlan_id);
+ hindex = hval & (adapter->fhash.fbucket_size - 1);
+ head = &(adapter->fhash.fhead[hindex]);
+
+ hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
+ if (ether_addr_equal(tmp_fil->faddr, (u8 *)&src_addr) &&
+ tmp_fil->vlan_id == vlan_id) {
+ if (time_is_before_jiffies(QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
+ qlcnic_change_filter(adapter, &src_addr,
+ vlan_id, tx_ring);
+ tmp_fil->ftime = jiffies;
+ return;
+ }
+ }
+
+ if (unlikely(adapter->fhash.fnum >= adapter->fhash.fmax)) {
+ adapter->stats.mac_filter_limit_overrun++;
+ return;
+ }
+
+ fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
+ if (!fil)
+ return;
+
+ qlcnic_change_filter(adapter, &src_addr, vlan_id, tx_ring);
+ fil->ftime = jiffies;
+ fil->vlan_id = vlan_id;
+ memcpy(fil->faddr, &src_addr, ETH_ALEN);
+ spin_lock(&adapter->mac_learn_lock);
+ hlist_add_head(&(fil->fnode), head);
+ adapter->fhash.fnum++;
+ spin_unlock(&adapter->mac_learn_lock);
+}
+
+#define QLCNIC_ENCAP_VXLAN_PKT BIT_0
+#define QLCNIC_ENCAP_OUTER_L3_IP6 BIT_1
+#define QLCNIC_ENCAP_INNER_L3_IP6 BIT_2
+#define QLCNIC_ENCAP_INNER_L4_UDP BIT_3
+#define QLCNIC_ENCAP_DO_L3_CSUM BIT_4
+#define QLCNIC_ENCAP_DO_L4_CSUM BIT_5
+
+static int qlcnic_tx_encap_pkt(struct qlcnic_adapter *adapter,
+ struct cmd_desc_type0 *first_desc,
+ struct sk_buff *skb,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ u8 opcode = 0, inner_hdr_len = 0, outer_hdr_len = 0, total_hdr_len = 0;
+ int copied, copy_len, descr_size;
+ u32 producer = tx_ring->producer;
+ struct cmd_desc_type0 *hwdesc;
+ u16 flags = 0, encap_descr = 0;
+
+ opcode = QLCNIC_TX_ETHER_PKT;
+ encap_descr = QLCNIC_ENCAP_VXLAN_PKT;
+
+ if (skb_is_gso(skb)) {
+ inner_hdr_len = skb_inner_transport_header(skb) +
+ inner_tcp_hdrlen(skb) -
+ skb_inner_mac_header(skb);
+
+ /* VXLAN header size = 8 */
+ outer_hdr_len = skb_transport_offset(skb) + 8 +
+ sizeof(struct udphdr);
+ first_desc->outer_hdr_length = outer_hdr_len;
+ total_hdr_len = inner_hdr_len + outer_hdr_len;
+ encap_descr |= QLCNIC_ENCAP_DO_L3_CSUM |
+ QLCNIC_ENCAP_DO_L4_CSUM;
+ first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
+ first_desc->hdr_length = inner_hdr_len;
+
+ /* Copy inner and outer headers in Tx descriptor(s)
+ * If total_hdr_len > cmd_desc_type0, use multiple
+ * descriptors
+ */
+ copied = 0;
+ descr_size = (int)sizeof(struct cmd_desc_type0);
+ while (copied < total_hdr_len) {
+ copy_len = min(descr_size, (total_hdr_len - copied));
+ hwdesc = &tx_ring->desc_head[producer];
+ tx_ring->cmd_buf_arr[producer].skb = NULL;
+ skb_copy_from_linear_data_offset(skb, copied,
+ (char *)hwdesc,
+ copy_len);
+ copied += copy_len;
+ producer = get_next_index(producer, tx_ring->num_desc);
+ }
+
+ tx_ring->producer = producer;
+
+ /* Make sure updated tx_ring->producer is visible
+ * for qlcnic_tx_avail()
+ */
+ smp_mb();
+ adapter->stats.encap_lso_frames++;
+
+ opcode = QLCNIC_TX_ENCAP_LSO;
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ if (inner_ip_hdr(skb)->version == 6) {
+ if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
+ encap_descr |= QLCNIC_ENCAP_INNER_L4_UDP;
+ } else {
+ if (inner_ip_hdr(skb)->protocol == IPPROTO_UDP)
+ encap_descr |= QLCNIC_ENCAP_INNER_L4_UDP;
+ }
+
+ adapter->stats.encap_tx_csummed++;
+ opcode = QLCNIC_TX_ENCAP_PKT;
+ }
+
+ /* Prepare first 16 bits of byte offset 16 of Tx descriptor */
+ if (ip_hdr(skb)->version == 6)
+ encap_descr |= QLCNIC_ENCAP_OUTER_L3_IP6;
+
+ /* outer IP header's size in 32bit words size*/
+ encap_descr |= (skb_network_header_len(skb) >> 2) << 6;
+
+ /* outer IP header offset */
+ encap_descr |= skb_network_offset(skb) << 10;
+ first_desc->encap_descr = cpu_to_le16(encap_descr);
+
+ first_desc->tcp_hdr_offset = skb_inner_transport_header(skb) -
+ skb->data;
+ first_desc->ip_hdr_offset = skb_inner_network_offset(skb);
+
+ qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
+
+ return 0;
+}
+
+static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
+ struct cmd_desc_type0 *first_desc, struct sk_buff *skb,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ u8 l4proto, opcode = 0, hdr_len = 0, tag_vlan = 0;
+ u16 flags = 0, vlan_tci = 0;
+ int copied, offset, copy_len, size;
+ struct cmd_desc_type0 *hwdesc;
+ struct vlan_ethhdr *vh;
+ u16 protocol = ntohs(skb->protocol);
+ u32 producer = tx_ring->producer;
+
+ if (protocol == ETH_P_8021Q) {
+ vh = skb_vlan_eth_hdr(skb);
+ flags = QLCNIC_FLAGS_VLAN_TAGGED;
+ vlan_tci = ntohs(vh->h_vlan_TCI);
+ protocol = ntohs(vh->h_vlan_encapsulated_proto);
+ tag_vlan = 1;
+ } else if (skb_vlan_tag_present(skb)) {
+ flags = QLCNIC_FLAGS_VLAN_OOB;
+ vlan_tci = skb_vlan_tag_get(skb);
+ tag_vlan = 1;
+ }
+ if (unlikely(adapter->tx_pvid)) {
+ if (tag_vlan && !(adapter->flags & QLCNIC_TAGGING_ENABLED))
+ return -EIO;
+ if (tag_vlan && (adapter->flags & QLCNIC_TAGGING_ENABLED))
+ goto set_flags;
+
+ flags = QLCNIC_FLAGS_VLAN_OOB;
+ vlan_tci = adapter->tx_pvid;
+ }
+set_flags:
+ qlcnic_set_tx_vlan_tci(first_desc, vlan_tci);
+ qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
+
+ if (*(skb->data) & BIT_0) {
+ flags |= BIT_0;
+ memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
+ }
+ opcode = QLCNIC_TX_ETHER_PKT;
+ if (skb_is_gso(skb)) {
+ hdr_len = skb_tcp_all_headers(skb);
+ first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
+ first_desc->hdr_length = hdr_len;
+ opcode = (protocol == ETH_P_IPV6) ? QLCNIC_TX_TCP_LSO6 :
+ QLCNIC_TX_TCP_LSO;
+
+ /* For LSO, we need to copy the MAC/IP/TCP headers into
+ * the descriptor ring */
+ copied = 0;
+ offset = 2;
+
+ if (flags & QLCNIC_FLAGS_VLAN_OOB) {
+ first_desc->hdr_length += VLAN_HLEN;
+ first_desc->tcp_hdr_offset = VLAN_HLEN;
+ first_desc->ip_hdr_offset = VLAN_HLEN;
+
+ /* Only in case of TSO on vlan device */
+ flags |= QLCNIC_FLAGS_VLAN_TAGGED;
+
+ /* Create a TSO vlan header template for firmware */
+ hwdesc = &tx_ring->desc_head[producer];
+ tx_ring->cmd_buf_arr[producer].skb = NULL;
+
+ copy_len = min((int)sizeof(struct cmd_desc_type0) -
+ offset, hdr_len + VLAN_HLEN);
+
+ vh = (struct vlan_ethhdr *)((char *) hwdesc + 2);
+ skb_copy_from_linear_data(skb, vh, 12);
+ vh->h_vlan_proto = htons(ETH_P_8021Q);
+ vh->h_vlan_TCI = htons(vlan_tci);
+
+ skb_copy_from_linear_data_offset(skb, 12,
+ (char *)vh + 16,
+ copy_len - 16);
+ copied = copy_len - VLAN_HLEN;
+ offset = 0;
+ producer = get_next_index(producer, tx_ring->num_desc);
+ }
+
+ while (copied < hdr_len) {
+ size = (int)sizeof(struct cmd_desc_type0) - offset;
+ copy_len = min(size, (hdr_len - copied));
+ hwdesc = &tx_ring->desc_head[producer];
+ tx_ring->cmd_buf_arr[producer].skb = NULL;
+ skb_copy_from_linear_data_offset(skb, copied,
+ (char *)hwdesc +
+ offset, copy_len);
+ copied += copy_len;
+ offset = 0;
+ producer = get_next_index(producer, tx_ring->num_desc);
+ }
+
+ tx_ring->producer = producer;
+ smp_mb();
+ adapter->stats.lso_frames++;
+
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ if (protocol == ETH_P_IP) {
+ l4proto = ip_hdr(skb)->protocol;
+
+ if (l4proto == IPPROTO_TCP)
+ opcode = QLCNIC_TX_TCP_PKT;
+ else if (l4proto == IPPROTO_UDP)
+ opcode = QLCNIC_TX_UDP_PKT;
+ } else if (protocol == ETH_P_IPV6) {
+ l4proto = ipv6_hdr(skb)->nexthdr;
+
+ if (l4proto == IPPROTO_TCP)
+ opcode = QLCNIC_TX_TCPV6_PKT;
+ else if (l4proto == IPPROTO_UDP)
+ opcode = QLCNIC_TX_UDPV6_PKT;
+ }
+ }
+ first_desc->tcp_hdr_offset += skb_transport_offset(skb);
+ first_desc->ip_hdr_offset += skb_network_offset(skb);
+ qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
+
+ return 0;
+}
+
+static int qlcnic_map_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
+ struct qlcnic_cmd_buffer *pbuf)
+{
+ struct qlcnic_skb_frag *nf;
+ skb_frag_t *frag;
+ int i, nr_frags;
+ dma_addr_t map;
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ nf = &pbuf->frag_array[0];
+
+ map = dma_map_single(&pdev->dev, skb->data, skb_headlen(skb),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, map))
+ goto out_err;
+
+ nf->dma = map;
+ nf->length = skb_headlen(skb);
+
+ for (i = 0; i < nr_frags; i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ nf = &pbuf->frag_array[i+1];
+ map = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, map))
+ goto unwind;
+
+ nf->dma = map;
+ nf->length = skb_frag_size(frag);
+ }
+
+ return 0;
+
+unwind:
+ while (--i >= 0) {
+ nf = &pbuf->frag_array[i+1];
+ dma_unmap_page(&pdev->dev, nf->dma, nf->length, DMA_TO_DEVICE);
+ }
+
+ nf = &pbuf->frag_array[0];
+ dma_unmap_single(&pdev->dev, nf->dma, skb_headlen(skb), DMA_TO_DEVICE);
+
+out_err:
+ return -ENOMEM;
+}
+
+static void qlcnic_unmap_buffers(struct pci_dev *pdev, struct sk_buff *skb,
+ struct qlcnic_cmd_buffer *pbuf)
+{
+ struct qlcnic_skb_frag *nf = &pbuf->frag_array[0];
+ int i, nr_frags = skb_shinfo(skb)->nr_frags;
+
+ for (i = 0; i < nr_frags; i++) {
+ nf = &pbuf->frag_array[i+1];
+ dma_unmap_page(&pdev->dev, nf->dma, nf->length, DMA_TO_DEVICE);
+ }
+
+ nf = &pbuf->frag_array[0];
+ dma_unmap_single(&pdev->dev, nf->dma, skb_headlen(skb), DMA_TO_DEVICE);
+ pbuf->skb = NULL;
+}
+
+static inline void qlcnic_clear_cmddesc(u64 *desc)
+{
+ desc[0] = 0ULL;
+ desc[2] = 0ULL;
+ desc[7] = 0ULL;
+}
+
+netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_host_tx_ring *tx_ring;
+ struct qlcnic_cmd_buffer *pbuf;
+ struct qlcnic_skb_frag *buffrag;
+ struct cmd_desc_type0 *hwdesc, *first_desc;
+ struct pci_dev *pdev;
+ struct ethhdr *phdr;
+ int i, k, frag_count, delta = 0;
+ u32 producer, num_txd;
+ u16 protocol;
+ bool l4_is_udp = false;
+
+ if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
+ netif_tx_stop_all_queues(netdev);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (adapter->flags & QLCNIC_MACSPOOF) {
+ phdr = (struct ethhdr *)skb->data;
+ if (!ether_addr_equal(phdr->h_source, adapter->mac_addr))
+ goto drop_packet;
+ }
+
+ tx_ring = &adapter->tx_ring[skb_get_queue_mapping(skb)];
+ num_txd = tx_ring->num_desc;
+
+ frag_count = skb_shinfo(skb)->nr_frags + 1;
+
+ /* 14 frags supported for normal packet and
+ * 32 frags supported for TSO packet
+ */
+ if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) {
+ for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++)
+ delta += skb_frag_size(&skb_shinfo(skb)->frags[i]);
+
+ if (!__pskb_pull_tail(skb, delta))
+ goto drop_packet;
+
+ frag_count = 1 + skb_shinfo(skb)->nr_frags;
+ }
+
+ if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
+ netif_tx_stop_queue(tx_ring->txq);
+ if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
+ netif_tx_start_queue(tx_ring->txq);
+ } else {
+ tx_ring->tx_stats.xmit_off++;
+ return NETDEV_TX_BUSY;
+ }
+ }
+
+ producer = tx_ring->producer;
+ pbuf = &tx_ring->cmd_buf_arr[producer];
+ pdev = adapter->pdev;
+ first_desc = &tx_ring->desc_head[producer];
+ hwdesc = &tx_ring->desc_head[producer];
+ qlcnic_clear_cmddesc((u64 *)hwdesc);
+
+ if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
+ adapter->stats.tx_dma_map_error++;
+ goto drop_packet;
+ }
+
+ pbuf->skb = skb;
+ pbuf->frag_count = frag_count;
+
+ qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
+ qlcnic_set_tx_port(first_desc, adapter->portnum);
+
+ for (i = 0; i < frag_count; i++) {
+ k = i % 4;
+
+ if ((k == 0) && (i > 0)) {
+ /* move to next desc.*/
+ producer = get_next_index(producer, num_txd);
+ hwdesc = &tx_ring->desc_head[producer];
+ qlcnic_clear_cmddesc((u64 *)hwdesc);
+ tx_ring->cmd_buf_arr[producer].skb = NULL;
+ }
+
+ buffrag = &pbuf->frag_array[i];
+ hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
+ switch (k) {
+ case 0:
+ hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
+ break;
+ case 1:
+ hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
+ break;
+ case 2:
+ hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
+ break;
+ case 3:
+ hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
+ break;
+ }
+ }
+
+ tx_ring->producer = get_next_index(producer, num_txd);
+ smp_mb();
+
+ protocol = ntohs(skb->protocol);
+ if (protocol == ETH_P_IP)
+ l4_is_udp = ip_hdr(skb)->protocol == IPPROTO_UDP;
+ else if (protocol == ETH_P_IPV6)
+ l4_is_udp = ipv6_hdr(skb)->nexthdr == IPPROTO_UDP;
+
+ /* Check if it is a VXLAN packet */
+ if (!skb->encapsulation || !l4_is_udp ||
+ !qlcnic_encap_tx_offload(adapter)) {
+ if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb,
+ tx_ring)))
+ goto unwind_buff;
+ } else {
+ if (unlikely(qlcnic_tx_encap_pkt(adapter, first_desc,
+ skb, tx_ring)))
+ goto unwind_buff;
+ }
+
+ if (adapter->drv_mac_learn)
+ qlcnic_send_filter(adapter, first_desc, skb, tx_ring);
+
+ tx_ring->tx_stats.tx_bytes += skb->len;
+ tx_ring->tx_stats.xmit_called++;
+
+ /* Ensure writes are complete before HW fetches Tx descriptors */
+ wmb();
+ qlcnic_update_cmd_producer(tx_ring);
+
+ return NETDEV_TX_OK;
+
+unwind_buff:
+ qlcnic_unmap_buffers(pdev, skb, pbuf);
+drop_packet:
+ adapter->stats.txdropped++;
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ if (adapter->ahw->linkup && !linkup) {
+ netdev_info(netdev, "NIC Link is down\n");
+ adapter->ahw->linkup = 0;
+ netif_carrier_off(netdev);
+ } else if (!adapter->ahw->linkup && linkup) {
+ adapter->ahw->linkup = 1;
+
+ /* Do not advertise Link up to the stack if device
+ * is in loopback mode
+ */
+ if (qlcnic_83xx_check(adapter) && adapter->ahw->lb_mode) {
+ netdev_info(netdev, "NIC Link is up for loopback test\n");
+ return;
+ }
+
+ netdev_info(netdev, "NIC Link is up\n");
+ netif_carrier_on(netdev);
+ }
+}
+
+static int qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_rds_ring *rds_ring,
+ struct qlcnic_rx_buffer *buffer)
+{
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ struct pci_dev *pdev = adapter->pdev;
+
+ skb = netdev_alloc_skb(adapter->netdev, rds_ring->skb_size);
+ if (!skb) {
+ adapter->stats.skb_alloc_failure++;
+ return -ENOMEM;
+ }
+
+ skb_reserve(skb, NET_IP_ALIGN);
+ dma = dma_map_single(&pdev->dev, skb->data, rds_ring->dma_size,
+ DMA_FROM_DEVICE);
+
+ if (dma_mapping_error(&pdev->dev, dma)) {
+ adapter->stats.rx_dma_map_error++;
+ dev_kfree_skb_any(skb);
+ return -ENOMEM;
+ }
+
+ buffer->skb = skb;
+ buffer->dma = dma;
+
+ return 0;
+}
+
+static void qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_rds_ring *rds_ring,
+ u8 ring_id)
+{
+ struct rcv_desc *pdesc;
+ struct qlcnic_rx_buffer *buffer;
+ int count = 0;
+ uint32_t producer, handle;
+ struct list_head *head;
+
+ if (!spin_trylock(&rds_ring->lock))
+ return;
+
+ producer = rds_ring->producer;
+ head = &rds_ring->free_list;
+ while (!list_empty(head)) {
+ buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
+
+ if (!buffer->skb) {
+ if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
+ break;
+ }
+ count++;
+ list_del(&buffer->list);
+
+ /* make a rcv descriptor */
+ pdesc = &rds_ring->desc_head[producer];
+ handle = qlcnic_get_ref_handle(adapter,
+ buffer->ref_handle, ring_id);
+ pdesc->reference_handle = cpu_to_le16(handle);
+ pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
+ pdesc->addr_buffer = cpu_to_le64(buffer->dma);
+ producer = get_next_index(producer, rds_ring->num_desc);
+ }
+ if (count) {
+ rds_ring->producer = producer;
+ writel((producer - 1) & (rds_ring->num_desc - 1),
+ rds_ring->crb_rcv_producer);
+ }
+ spin_unlock(&rds_ring->lock);
+}
+
+static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring,
+ int budget)
+{
+ u32 sw_consumer, hw_consumer;
+ int i, done, count = 0;
+ struct qlcnic_cmd_buffer *buffer;
+ struct pci_dev *pdev = adapter->pdev;
+ struct net_device *netdev = adapter->netdev;
+ struct qlcnic_skb_frag *frag;
+
+ if (!spin_trylock(&tx_ring->tx_clean_lock))
+ return 1;
+
+ sw_consumer = tx_ring->sw_consumer;
+ hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
+
+ while (sw_consumer != hw_consumer) {
+ buffer = &tx_ring->cmd_buf_arr[sw_consumer];
+ if (buffer->skb) {
+ frag = &buffer->frag_array[0];
+ dma_unmap_single(&pdev->dev, frag->dma, frag->length,
+ DMA_TO_DEVICE);
+ frag->dma = 0ULL;
+ for (i = 1; i < buffer->frag_count; i++) {
+ frag++;
+ dma_unmap_page(&pdev->dev, frag->dma,
+ frag->length, DMA_TO_DEVICE);
+ frag->dma = 0ULL;
+ }
+ tx_ring->tx_stats.xmit_finished++;
+ dev_kfree_skb_any(buffer->skb);
+ buffer->skb = NULL;
+ }
+
+ sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
+ if (++count >= budget)
+ break;
+ }
+
+ tx_ring->sw_consumer = sw_consumer;
+
+ if (count && netif_running(netdev)) {
+ smp_mb();
+ if (netif_tx_queue_stopped(tx_ring->txq) &&
+ netif_carrier_ok(netdev)) {
+ if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
+ netif_tx_wake_queue(tx_ring->txq);
+ tx_ring->tx_stats.xmit_on++;
+ }
+ }
+ adapter->tx_timeo_cnt = 0;
+ }
+ /*
+ * If everything is freed up to consumer then check if the ring is full
+ * If the ring is full then check if more needs to be freed and
+ * schedule the call back again.
+ *
+ * This happens when there are 2 CPUs. One could be freeing and the
+ * other filling it. If the ring is full when we get out of here and
+ * the card has already interrupted the host then the host can miss the
+ * interrupt.
+ *
+ * There is still a possible race condition and the host could miss an
+ * interrupt. The card has to take care of this.
+ */
+ hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
+ done = (sw_consumer == hw_consumer);
+
+ spin_unlock(&tx_ring->tx_clean_lock);
+
+ return done;
+}
+
+static int qlcnic_poll(struct napi_struct *napi, int budget)
+{
+ int tx_complete, work_done;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_adapter *adapter;
+ struct qlcnic_host_tx_ring *tx_ring;
+
+ sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
+ adapter = sds_ring->adapter;
+ tx_ring = sds_ring->tx_ring;
+
+ tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring,
+ budget);
+ work_done = qlcnic_process_rcv_ring(sds_ring, budget);
+
+ /* Check if we need a repoll */
+ if (!tx_complete)
+ work_done = budget;
+
+ if (work_done < budget) {
+ napi_complete_done(&sds_ring->napi, work_done);
+ if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
+ qlcnic_enable_sds_intr(adapter, sds_ring);
+ qlcnic_enable_tx_intr(adapter, tx_ring);
+ }
+ }
+
+ return work_done;
+}
+
+static int qlcnic_tx_poll(struct napi_struct *napi, int budget)
+{
+ struct qlcnic_host_tx_ring *tx_ring;
+ struct qlcnic_adapter *adapter;
+ int work_done;
+
+ tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi);
+ adapter = tx_ring->adapter;
+
+ work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
+ if (work_done) {
+ napi_complete(&tx_ring->napi);
+ if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
+ qlcnic_enable_tx_intr(adapter, tx_ring);
+ } else {
+ /* As qlcnic_process_cmd_ring() returned 0, we need a repoll*/
+ work_done = budget;
+ }
+
+ return work_done;
+}
+
+static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_adapter *adapter;
+ int work_done;
+
+ sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
+ adapter = sds_ring->adapter;
+
+ work_done = qlcnic_process_rcv_ring(sds_ring, budget);
+
+ if (work_done < budget) {
+ napi_complete_done(&sds_ring->napi, work_done);
+ if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
+ qlcnic_enable_sds_intr(adapter, sds_ring);
+ }
+
+ return work_done;
+}
+
+static void qlcnic_handle_linkevent(struct qlcnic_adapter *adapter,
+ struct qlcnic_fw_msg *msg)
+{
+ u32 cable_OUI;
+ u16 cable_len, link_speed;
+ u8 link_status, module, duplex, autoneg, lb_status = 0;
+ struct net_device *netdev = adapter->netdev;
+
+ adapter->ahw->has_link_events = 1;
+
+ cable_OUI = msg->body[1] & 0xffffffff;
+ cable_len = (msg->body[1] >> 32) & 0xffff;
+ link_speed = (msg->body[1] >> 48) & 0xffff;
+
+ link_status = msg->body[2] & 0xff;
+ duplex = (msg->body[2] >> 16) & 0xff;
+ autoneg = (msg->body[2] >> 24) & 0xff;
+ lb_status = (msg->body[2] >> 32) & 0x3;
+
+ module = (msg->body[2] >> 8) & 0xff;
+ if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE)
+ dev_info(&netdev->dev,
+ "unsupported cable: OUI 0x%x, length %d\n",
+ cable_OUI, cable_len);
+ else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN)
+ dev_info(&netdev->dev, "unsupported cable length %d\n",
+ cable_len);
+
+ if (!link_status && (lb_status == QLCNIC_ILB_MODE ||
+ lb_status == QLCNIC_ELB_MODE))
+ adapter->ahw->loopback_state |= QLCNIC_LINKEVENT;
+
+ qlcnic_advert_link_change(adapter, link_status);
+
+ if (duplex == LINKEVENT_FULL_DUPLEX)
+ adapter->ahw->link_duplex = DUPLEX_FULL;
+ else
+ adapter->ahw->link_duplex = DUPLEX_HALF;
+
+ adapter->ahw->module_type = module;
+ adapter->ahw->link_autoneg = autoneg;
+
+ if (link_status) {
+ adapter->ahw->link_speed = link_speed;
+ } else {
+ adapter->ahw->link_speed = SPEED_UNKNOWN;
+ adapter->ahw->link_duplex = DUPLEX_UNKNOWN;
+ }
+}
+
+static void qlcnic_handle_fw_message(int desc_cnt, int index,
+ struct qlcnic_host_sds_ring *sds_ring)
+{
+ struct qlcnic_fw_msg msg;
+ struct status_desc *desc;
+ struct qlcnic_adapter *adapter;
+ struct device *dev;
+ int i = 0, opcode, ret;
+
+ while (desc_cnt > 0 && i < 8) {
+ desc = &sds_ring->desc_head[index];
+ msg.words[i++] = le64_to_cpu(desc->status_desc_data[0]);
+ msg.words[i++] = le64_to_cpu(desc->status_desc_data[1]);
+
+ index = get_next_index(index, sds_ring->num_desc);
+ desc_cnt--;
+ }
+
+ adapter = sds_ring->adapter;
+ dev = &adapter->pdev->dev;
+ opcode = qlcnic_get_nic_msg_opcode(msg.body[0]);
+
+ switch (opcode) {
+ case QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE:
+ qlcnic_handle_linkevent(adapter, &msg);
+ break;
+ case QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK:
+ ret = (u32)(msg.body[1]);
+ switch (ret) {
+ case 0:
+ adapter->ahw->loopback_state |= QLCNIC_LB_RESPONSE;
+ break;
+ case 1:
+ dev_info(dev, "loopback already in progress\n");
+ adapter->ahw->diag_cnt = -EINPROGRESS;
+ break;
+ case 2:
+ dev_info(dev, "loopback cable is not connected\n");
+ adapter->ahw->diag_cnt = -ENODEV;
+ break;
+ default:
+ dev_info(dev,
+ "loopback configure request failed, err %x\n",
+ ret);
+ adapter->ahw->diag_cnt = -EIO;
+ break;
+ }
+ break;
+ case QLCNIC_C2H_OPCODE_GET_DCB_AEN:
+ qlcnic_dcb_aen_handler(adapter->dcb, (void *)&msg);
+ break;
+ default:
+ break;
+ }
+}
+
+static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_rds_ring *ring,
+ u16 index, u16 cksum)
+{
+ struct qlcnic_rx_buffer *buffer;
+ struct sk_buff *skb;
+
+ buffer = &ring->rx_buf_arr[index];
+ if (unlikely(buffer->skb == NULL)) {
+ WARN_ON(1);
+ return NULL;
+ }
+
+ dma_unmap_single(&adapter->pdev->dev, buffer->dma, ring->dma_size,
+ DMA_FROM_DEVICE);
+
+ skb = buffer->skb;
+ if (likely((adapter->netdev->features & NETIF_F_RXCSUM) &&
+ (cksum == STATUS_CKSUM_OK || cksum == STATUS_CKSUM_LOOP))) {
+ adapter->stats.csummed++;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else {
+ skb_checksum_none_assert(skb);
+ }
+
+
+ buffer->skb = NULL;
+
+ return skb;
+}
+
+static inline int qlcnic_check_rx_tagging(struct qlcnic_adapter *adapter,
+ struct sk_buff *skb, u16 *vlan_tag)
+{
+ struct ethhdr *eth_hdr;
+
+ if (!__vlan_get_tag(skb, vlan_tag)) {
+ eth_hdr = (struct ethhdr *)skb->data;
+ memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2);
+ skb_pull(skb, VLAN_HLEN);
+ }
+ if (!adapter->rx_pvid)
+ return 0;
+
+ if (*vlan_tag == adapter->rx_pvid) {
+ /* Outer vlan tag. Packet should follow non-vlan path */
+ *vlan_tag = 0xffff;
+ return 0;
+ }
+ if (adapter->flags & QLCNIC_TAGGING_ENABLED)
+ return 0;
+
+ return -EINVAL;
+}
+
+static struct qlcnic_rx_buffer *
+qlcnic_process_rcv(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring, int ring,
+ u64 sts_data0)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_rx_buffer *buffer;
+ struct sk_buff *skb;
+ struct qlcnic_host_rds_ring *rds_ring;
+ int index, length, cksum, pkt_offset, is_lb_pkt;
+ u16 vid = 0xffff, t_vid;
+
+ if (unlikely(ring >= adapter->max_rds_rings))
+ return NULL;
+
+ rds_ring = &recv_ctx->rds_rings[ring];
+
+ index = qlcnic_get_sts_refhandle(sts_data0);
+ if (unlikely(index >= rds_ring->num_desc))
+ return NULL;
+
+ buffer = &rds_ring->rx_buf_arr[index];
+ length = qlcnic_get_sts_totallength(sts_data0);
+ cksum = qlcnic_get_sts_status(sts_data0);
+ pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
+
+ skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
+ if (!skb)
+ return buffer;
+
+ if (adapter->rx_mac_learn) {
+ t_vid = 0;
+ is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0);
+ qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
+ }
+
+ if (length > rds_ring->skb_size)
+ skb_put(skb, rds_ring->skb_size);
+ else
+ skb_put(skb, length);
+
+ if (pkt_offset)
+ skb_pull(skb, pkt_offset);
+
+ if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
+ adapter->stats.rxdropped++;
+ dev_kfree_skb(skb);
+ return buffer;
+ }
+
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ if (vid != 0xffff)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
+
+ napi_gro_receive(&sds_ring->napi, skb);
+
+ adapter->stats.rx_pkts++;
+ adapter->stats.rxbytes += length;
+
+ return buffer;
+}
+
+#define QLC_TCP_HDR_SIZE 20
+#define QLC_TCP_TS_OPTION_SIZE 12
+#define QLC_TCP_TS_HDR_SIZE (QLC_TCP_HDR_SIZE + QLC_TCP_TS_OPTION_SIZE)
+
+static struct qlcnic_rx_buffer *
+qlcnic_process_lro(struct qlcnic_adapter *adapter,
+ int ring, u64 sts_data0, u64 sts_data1)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_rx_buffer *buffer;
+ struct sk_buff *skb;
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct iphdr *iph;
+ struct ipv6hdr *ipv6h;
+ struct tcphdr *th;
+ bool push, timestamp;
+ int index, l2_hdr_offset, l4_hdr_offset, is_lb_pkt;
+ u16 lro_length, length, data_offset, t_vid, vid = 0xffff;
+ u32 seq_number;
+
+ if (unlikely(ring >= adapter->max_rds_rings))
+ return NULL;
+
+ rds_ring = &recv_ctx->rds_rings[ring];
+
+ index = qlcnic_get_lro_sts_refhandle(sts_data0);
+ if (unlikely(index >= rds_ring->num_desc))
+ return NULL;
+
+ buffer = &rds_ring->rx_buf_arr[index];
+
+ timestamp = qlcnic_get_lro_sts_timestamp(sts_data0);
+ lro_length = qlcnic_get_lro_sts_length(sts_data0);
+ l2_hdr_offset = qlcnic_get_lro_sts_l2_hdr_offset(sts_data0);
+ l4_hdr_offset = qlcnic_get_lro_sts_l4_hdr_offset(sts_data0);
+ push = qlcnic_get_lro_sts_push_flag(sts_data0);
+ seq_number = qlcnic_get_lro_sts_seq_number(sts_data1);
+
+ skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
+ if (!skb)
+ return buffer;
+
+ if (adapter->rx_mac_learn) {
+ t_vid = 0;
+ is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0);
+ qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
+ }
+
+ if (timestamp)
+ data_offset = l4_hdr_offset + QLC_TCP_TS_HDR_SIZE;
+ else
+ data_offset = l4_hdr_offset + QLC_TCP_HDR_SIZE;
+
+ skb_put(skb, lro_length + data_offset);
+ skb_pull(skb, l2_hdr_offset);
+
+ if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
+ adapter->stats.rxdropped++;
+ dev_kfree_skb(skb);
+ return buffer;
+ }
+
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ if (ntohs(skb->protocol) == ETH_P_IPV6) {
+ ipv6h = (struct ipv6hdr *)skb->data;
+ th = (struct tcphdr *)(skb->data + sizeof(struct ipv6hdr));
+ length = (th->doff << 2) + lro_length;
+ ipv6h->payload_len = htons(length);
+ } else {
+ iph = (struct iphdr *)skb->data;
+ th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
+ length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
+ csum_replace2(&iph->check, iph->tot_len, htons(length));
+ iph->tot_len = htons(length);
+ }
+
+ th->psh = push;
+ th->seq = htonl(seq_number);
+ length = skb->len;
+
+ if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP) {
+ skb_shinfo(skb)->gso_size = qlcnic_get_lro_sts_mss(sts_data1);
+ if (skb->protocol == htons(ETH_P_IPV6))
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+ else
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+ }
+
+ if (vid != 0xffff)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
+ netif_receive_skb(skb);
+
+ adapter->stats.lro_pkts++;
+ adapter->stats.lrobytes += length;
+
+ return buffer;
+}
+
+static int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
+{
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+ struct list_head *cur;
+ struct status_desc *desc;
+ struct qlcnic_rx_buffer *rxbuf;
+ int opcode, desc_cnt, count = 0;
+ u64 sts_data0, sts_data1;
+ u8 ring;
+ u32 consumer = sds_ring->consumer;
+
+ while (count < max) {
+ desc = &sds_ring->desc_head[consumer];
+ sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
+
+ if (!(sts_data0 & STATUS_OWNER_HOST))
+ break;
+
+ desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
+ opcode = qlcnic_get_sts_opcode(sts_data0);
+ switch (opcode) {
+ case QLCNIC_RXPKT_DESC:
+ case QLCNIC_OLD_RXPKT_DESC:
+ case QLCNIC_SYN_OFFLOAD:
+ ring = qlcnic_get_sts_type(sts_data0);
+ rxbuf = qlcnic_process_rcv(adapter, sds_ring, ring,
+ sts_data0);
+ break;
+ case QLCNIC_LRO_DESC:
+ ring = qlcnic_get_lro_sts_type(sts_data0);
+ sts_data1 = le64_to_cpu(desc->status_desc_data[1]);
+ rxbuf = qlcnic_process_lro(adapter, ring, sts_data0,
+ sts_data1);
+ break;
+ case QLCNIC_RESPONSE_DESC:
+ qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring);
+ goto skip;
+ default:
+ goto skip;
+ }
+ WARN_ON(desc_cnt > 1);
+
+ if (likely(rxbuf))
+ list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
+ else
+ adapter->stats.null_rxbuf++;
+skip:
+ for (; desc_cnt > 0; desc_cnt--) {
+ desc = &sds_ring->desc_head[consumer];
+ desc->status_desc_data[0] = QLCNIC_DESC_OWNER_FW;
+ consumer = get_next_index(consumer, sds_ring->num_desc);
+ }
+ count++;
+ }
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &adapter->recv_ctx->rds_rings[ring];
+ if (!list_empty(&sds_ring->free_list[ring])) {
+ list_for_each(cur, &sds_ring->free_list[ring]) {
+ rxbuf = list_entry(cur, struct qlcnic_rx_buffer,
+ list);
+ qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf);
+ }
+ spin_lock(&rds_ring->lock);
+ list_splice_tail_init(&sds_ring->free_list[ring],
+ &rds_ring->free_list);
+ spin_unlock(&rds_ring->lock);
+ }
+
+ qlcnic_post_rx_buffers_nodb(adapter, rds_ring, ring);
+ }
+
+ if (count) {
+ sds_ring->consumer = consumer;
+ writel(consumer, sds_ring->crb_sts_consumer);
+ }
+
+ return count;
+}
+
+void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_rds_ring *rds_ring, u8 ring_id)
+{
+ struct rcv_desc *pdesc;
+ struct qlcnic_rx_buffer *buffer;
+ int count = 0;
+ u32 producer, handle;
+ struct list_head *head;
+
+ producer = rds_ring->producer;
+ head = &rds_ring->free_list;
+
+ while (!list_empty(head)) {
+
+ buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
+
+ if (!buffer->skb) {
+ if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
+ break;
+ }
+
+ count++;
+ list_del(&buffer->list);
+
+ /* make a rcv descriptor */
+ pdesc = &rds_ring->desc_head[producer];
+ pdesc->addr_buffer = cpu_to_le64(buffer->dma);
+ handle = qlcnic_get_ref_handle(adapter, buffer->ref_handle,
+ ring_id);
+ pdesc->reference_handle = cpu_to_le16(handle);
+ pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
+ producer = get_next_index(producer, rds_ring->num_desc);
+ }
+
+ if (count) {
+ rds_ring->producer = producer;
+ writel((producer-1) & (rds_ring->num_desc-1),
+ rds_ring->crb_rcv_producer);
+ }
+}
+
+static void dump_skb(struct sk_buff *skb, struct qlcnic_adapter *adapter)
+{
+ if (adapter->ahw->msg_enable & NETIF_MSG_DRV) {
+ char prefix[30];
+
+ scnprintf(prefix, sizeof(prefix), "%s: %s: ",
+ dev_name(&adapter->pdev->dev), __func__);
+
+ print_hex_dump_debug(prefix, DUMP_PREFIX_NONE, 16, 1,
+ skb->data, skb->len, true);
+ }
+}
+
+static void qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter, int ring,
+ u64 sts_data0)
+{
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct sk_buff *skb;
+ struct qlcnic_host_rds_ring *rds_ring;
+ int index, length, cksum, pkt_offset;
+
+ if (unlikely(ring >= adapter->max_rds_rings))
+ return;
+
+ rds_ring = &recv_ctx->rds_rings[ring];
+
+ index = qlcnic_get_sts_refhandle(sts_data0);
+ length = qlcnic_get_sts_totallength(sts_data0);
+ if (unlikely(index >= rds_ring->num_desc))
+ return;
+
+ cksum = qlcnic_get_sts_status(sts_data0);
+ pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
+
+ skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
+ if (!skb)
+ return;
+
+ if (length > rds_ring->skb_size)
+ skb_put(skb, rds_ring->skb_size);
+ else
+ skb_put(skb, length);
+
+ if (pkt_offset)
+ skb_pull(skb, pkt_offset);
+
+ if (!qlcnic_check_loopback_buff(skb->data, adapter->mac_addr))
+ adapter->ahw->diag_cnt++;
+ else
+ dump_skb(skb, adapter);
+
+ dev_kfree_skb_any(skb);
+ adapter->stats.rx_pkts++;
+ adapter->stats.rxbytes += length;
+
+ return;
+}
+
+void qlcnic_82xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
+{
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+ struct status_desc *desc;
+ u64 sts_data0;
+ int ring, opcode, desc_cnt;
+
+ u32 consumer = sds_ring->consumer;
+
+ desc = &sds_ring->desc_head[consumer];
+ sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
+
+ if (!(sts_data0 & STATUS_OWNER_HOST))
+ return;
+
+ desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
+ opcode = qlcnic_get_sts_opcode(sts_data0);
+ switch (opcode) {
+ case QLCNIC_RESPONSE_DESC:
+ qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring);
+ break;
+ default:
+ ring = qlcnic_get_sts_type(sts_data0);
+ qlcnic_process_rcv_diag(adapter, ring, sts_data0);
+ break;
+ }
+
+ for (; desc_cnt > 0; desc_cnt--) {
+ desc = &sds_ring->desc_head[consumer];
+ desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
+ consumer = get_next_index(consumer, sds_ring->num_desc);
+ }
+
+ sds_ring->consumer = consumer;
+ writel(consumer, sds_ring->crb_sts_consumer);
+}
+
+int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
+ struct net_device *netdev)
+{
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_host_tx_ring *tx_ring;
+
+ if (qlcnic_alloc_sds_rings(recv_ctx, adapter->drv_sds_rings))
+ return -ENOMEM;
+
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test) {
+ netif_napi_add(netdev, &sds_ring->napi,
+ qlcnic_rx_poll);
+ } else {
+ if (ring == (adapter->drv_sds_rings - 1))
+ netif_napi_add(netdev, &sds_ring->napi,
+ qlcnic_poll);
+ else
+ netif_napi_add(netdev, &sds_ring->napi,
+ qlcnic_rx_poll);
+ }
+ }
+
+ if (qlcnic_alloc_tx_rings(adapter, netdev)) {
+ qlcnic_free_sds_rings(recv_ctx);
+ return -ENOMEM;
+ }
+
+ if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ netif_napi_add_tx(netdev, &tx_ring->napi,
+ qlcnic_tx_poll);
+ }
+ }
+
+ return 0;
+}
+
+void qlcnic_82xx_napi_del(struct qlcnic_adapter *adapter)
+{
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_host_tx_ring *tx_ring;
+
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ netif_napi_del(&sds_ring->napi);
+ }
+
+ qlcnic_free_sds_rings(adapter->recv_ctx);
+
+ if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ netif_napi_del(&tx_ring->napi);
+ }
+ }
+
+ qlcnic_free_tx_rings(adapter);
+}
+
+void qlcnic_82xx_napi_enable(struct qlcnic_adapter *adapter)
+{
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return;
+
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ napi_enable(&sds_ring->napi);
+ qlcnic_enable_sds_intr(adapter, sds_ring);
+ }
+
+ if (qlcnic_check_multi_tx(adapter) &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED) &&
+ !adapter->ahw->diag_test) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ napi_enable(&tx_ring->napi);
+ qlcnic_enable_tx_intr(adapter, tx_ring);
+ }
+ }
+}
+
+void qlcnic_82xx_napi_disable(struct qlcnic_adapter *adapter)
+{
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return;
+
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ qlcnic_disable_sds_intr(adapter, sds_ring);
+ napi_synchronize(&sds_ring->napi);
+ napi_disable(&sds_ring->napi);
+ }
+
+ if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
+ !adapter->ahw->diag_test &&
+ qlcnic_check_multi_tx(adapter)) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ qlcnic_disable_tx_intr(adapter, tx_ring);
+ napi_synchronize(&tx_ring->napi);
+ napi_disable(&tx_ring->napi);
+ }
+ }
+}
+
+#define QLC_83XX_NORMAL_LB_PKT (1ULL << 36)
+#define QLC_83XX_LRO_LB_PKT (1ULL << 46)
+
+static inline int qlcnic_83xx_is_lb_pkt(u64 sts_data, int lro_pkt)
+{
+ if (lro_pkt)
+ return (sts_data & QLC_83XX_LRO_LB_PKT) ? 1 : 0;
+ else
+ return (sts_data & QLC_83XX_NORMAL_LB_PKT) ? 1 : 0;
+}
+
+#define QLCNIC_ENCAP_LENGTH_MASK 0x7f
+
+static inline u8 qlcnic_encap_length(u64 sts_data)
+{
+ return sts_data & QLCNIC_ENCAP_LENGTH_MASK;
+}
+
+static struct qlcnic_rx_buffer *
+qlcnic_83xx_process_rcv(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring,
+ u8 ring, u64 sts_data[])
+{
+ struct net_device *netdev = adapter->netdev;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_rx_buffer *buffer;
+ struct sk_buff *skb;
+ struct qlcnic_host_rds_ring *rds_ring;
+ int index, length, cksum, is_lb_pkt;
+ u16 vid = 0xffff;
+ int err;
+
+ if (unlikely(ring >= adapter->max_rds_rings))
+ return NULL;
+
+ rds_ring = &recv_ctx->rds_rings[ring];
+
+ index = qlcnic_83xx_hndl(sts_data[0]);
+ if (unlikely(index >= rds_ring->num_desc))
+ return NULL;
+
+ buffer = &rds_ring->rx_buf_arr[index];
+ length = qlcnic_83xx_pktln(sts_data[0]);
+ cksum = qlcnic_83xx_csum_status(sts_data[1]);
+ skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
+ if (!skb)
+ return buffer;
+
+ if (length > rds_ring->skb_size)
+ skb_put(skb, rds_ring->skb_size);
+ else
+ skb_put(skb, length);
+
+ err = qlcnic_check_rx_tagging(adapter, skb, &vid);
+
+ if (adapter->rx_mac_learn) {
+ is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 0);
+ qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, vid);
+ }
+
+ if (unlikely(err)) {
+ adapter->stats.rxdropped++;
+ dev_kfree_skb(skb);
+ return buffer;
+ }
+
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ if (qlcnic_encap_length(sts_data[1]) &&
+ skb->ip_summed == CHECKSUM_UNNECESSARY) {
+ skb->csum_level = 1;
+ adapter->stats.encap_rx_csummed++;
+ }
+
+ if (vid != 0xffff)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
+
+ napi_gro_receive(&sds_ring->napi, skb);
+
+ adapter->stats.rx_pkts++;
+ adapter->stats.rxbytes += length;
+
+ return buffer;
+}
+
+static struct qlcnic_rx_buffer *
+qlcnic_83xx_process_lro(struct qlcnic_adapter *adapter,
+ u8 ring, u64 sts_data[])
+{
+ struct net_device *netdev = adapter->netdev;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_rx_buffer *buffer;
+ struct sk_buff *skb;
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct iphdr *iph;
+ struct ipv6hdr *ipv6h;
+ struct tcphdr *th;
+ bool push;
+ int l2_hdr_offset, l4_hdr_offset;
+ int index, is_lb_pkt;
+ u16 lro_length, length, data_offset, gso_size;
+ u16 vid = 0xffff;
+ int err;
+
+ if (unlikely(ring >= adapter->max_rds_rings))
+ return NULL;
+
+ rds_ring = &recv_ctx->rds_rings[ring];
+
+ index = qlcnic_83xx_hndl(sts_data[0]);
+ if (unlikely(index >= rds_ring->num_desc))
+ return NULL;
+
+ buffer = &rds_ring->rx_buf_arr[index];
+
+ lro_length = qlcnic_83xx_lro_pktln(sts_data[0]);
+ l2_hdr_offset = qlcnic_83xx_l2_hdr_off(sts_data[1]);
+ l4_hdr_offset = qlcnic_83xx_l4_hdr_off(sts_data[1]);
+ push = qlcnic_83xx_is_psh_bit(sts_data[1]);
+
+ skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
+ if (!skb)
+ return buffer;
+
+ if (qlcnic_83xx_is_tstamp(sts_data[1]))
+ data_offset = l4_hdr_offset + QLCNIC_TCP_TS_HDR_SIZE;
+ else
+ data_offset = l4_hdr_offset + QLCNIC_TCP_HDR_SIZE;
+
+ skb_put(skb, lro_length + data_offset);
+ skb_pull(skb, l2_hdr_offset);
+
+ err = qlcnic_check_rx_tagging(adapter, skb, &vid);
+
+ if (adapter->rx_mac_learn) {
+ is_lb_pkt = qlcnic_83xx_is_lb_pkt(sts_data[1], 1);
+ qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, vid);
+ }
+
+ if (unlikely(err)) {
+ adapter->stats.rxdropped++;
+ dev_kfree_skb(skb);
+ return buffer;
+ }
+
+ skb->protocol = eth_type_trans(skb, netdev);
+ if (ntohs(skb->protocol) == ETH_P_IPV6) {
+ ipv6h = (struct ipv6hdr *)skb->data;
+ th = (struct tcphdr *)(skb->data + sizeof(struct ipv6hdr));
+
+ length = (th->doff << 2) + lro_length;
+ ipv6h->payload_len = htons(length);
+ } else {
+ iph = (struct iphdr *)skb->data;
+ th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
+ length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
+ csum_replace2(&iph->check, iph->tot_len, htons(length));
+ iph->tot_len = htons(length);
+ }
+
+ th->psh = push;
+ length = skb->len;
+
+ if (adapter->flags & QLCNIC_FW_LRO_MSS_CAP) {
+ gso_size = qlcnic_83xx_get_lro_sts_mss(sts_data[0]);
+ skb_shinfo(skb)->gso_size = gso_size;
+ if (skb->protocol == htons(ETH_P_IPV6))
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+ else
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+ }
+
+ if (vid != 0xffff)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
+
+ netif_receive_skb(skb);
+
+ adapter->stats.lro_pkts++;
+ adapter->stats.lrobytes += length;
+ return buffer;
+}
+
+static int qlcnic_83xx_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring,
+ int max)
+{
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+ struct list_head *cur;
+ struct status_desc *desc;
+ struct qlcnic_rx_buffer *rxbuf = NULL;
+ u8 ring;
+ u64 sts_data[2];
+ int count = 0, opcode;
+ u32 consumer = sds_ring->consumer;
+
+ while (count < max) {
+ desc = &sds_ring->desc_head[consumer];
+ sts_data[1] = le64_to_cpu(desc->status_desc_data[1]);
+ opcode = qlcnic_83xx_opcode(sts_data[1]);
+ if (!opcode)
+ break;
+ sts_data[0] = le64_to_cpu(desc->status_desc_data[0]);
+ ring = QLCNIC_FETCH_RING_ID(sts_data[0]);
+
+ switch (opcode) {
+ case QLC_83XX_REG_DESC:
+ rxbuf = qlcnic_83xx_process_rcv(adapter, sds_ring,
+ ring, sts_data);
+ break;
+ case QLC_83XX_LRO_DESC:
+ rxbuf = qlcnic_83xx_process_lro(adapter, ring,
+ sts_data);
+ break;
+ default:
+ dev_info(&adapter->pdev->dev,
+ "Unknown opcode: 0x%x\n", opcode);
+ goto skip;
+ }
+
+ if (likely(rxbuf))
+ list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
+ else
+ adapter->stats.null_rxbuf++;
+skip:
+ desc = &sds_ring->desc_head[consumer];
+ /* Reset the descriptor */
+ desc->status_desc_data[1] = 0;
+ consumer = get_next_index(consumer, sds_ring->num_desc);
+ count++;
+ }
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &adapter->recv_ctx->rds_rings[ring];
+ if (!list_empty(&sds_ring->free_list[ring])) {
+ list_for_each(cur, &sds_ring->free_list[ring]) {
+ rxbuf = list_entry(cur, struct qlcnic_rx_buffer,
+ list);
+ qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf);
+ }
+ spin_lock(&rds_ring->lock);
+ list_splice_tail_init(&sds_ring->free_list[ring],
+ &rds_ring->free_list);
+ spin_unlock(&rds_ring->lock);
+ }
+ qlcnic_post_rx_buffers_nodb(adapter, rds_ring, ring);
+ }
+ if (count) {
+ sds_ring->consumer = consumer;
+ writel(consumer, sds_ring->crb_sts_consumer);
+ }
+ return count;
+}
+
+static int qlcnic_83xx_msix_sriov_vf_poll(struct napi_struct *napi, int budget)
+{
+ int tx_complete;
+ int work_done;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_adapter *adapter;
+ struct qlcnic_host_tx_ring *tx_ring;
+
+ sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
+ adapter = sds_ring->adapter;
+ /* tx ring count = 1 */
+ tx_ring = adapter->tx_ring;
+
+ tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
+ work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
+
+ /* Check if we need a repoll */
+ if (!tx_complete)
+ work_done = budget;
+
+ if (work_done < budget) {
+ napi_complete_done(&sds_ring->napi, work_done);
+ qlcnic_enable_sds_intr(adapter, sds_ring);
+ }
+
+ return work_done;
+}
+
+static int qlcnic_83xx_poll(struct napi_struct *napi, int budget)
+{
+ int tx_complete;
+ int work_done;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_adapter *adapter;
+ struct qlcnic_host_tx_ring *tx_ring;
+
+ sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
+ adapter = sds_ring->adapter;
+ /* tx ring count = 1 */
+ tx_ring = adapter->tx_ring;
+
+ tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
+ work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
+
+ /* Check if we need a repoll */
+ if (!tx_complete)
+ work_done = budget;
+
+ if (work_done < budget) {
+ napi_complete_done(&sds_ring->napi, work_done);
+ qlcnic_enable_sds_intr(adapter, sds_ring);
+ }
+
+ return work_done;
+}
+
+static int qlcnic_83xx_msix_tx_poll(struct napi_struct *napi, int budget)
+{
+ int work_done;
+ struct qlcnic_host_tx_ring *tx_ring;
+ struct qlcnic_adapter *adapter;
+
+ tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi);
+ adapter = tx_ring->adapter;
+ work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
+ if (work_done) {
+ napi_complete(&tx_ring->napi);
+ if (test_bit(__QLCNIC_DEV_UP , &adapter->state))
+ qlcnic_enable_tx_intr(adapter, tx_ring);
+ } else {
+ /* need a repoll */
+ work_done = budget;
+ }
+
+ return work_done;
+}
+
+static int qlcnic_83xx_rx_poll(struct napi_struct *napi, int budget)
+{
+ int work_done;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_adapter *adapter;
+
+ sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
+ adapter = sds_ring->adapter;
+ work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
+ if (work_done < budget) {
+ napi_complete_done(&sds_ring->napi, work_done);
+ if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
+ qlcnic_enable_sds_intr(adapter, sds_ring);
+ }
+
+ return work_done;
+}
+
+void qlcnic_83xx_napi_enable(struct qlcnic_adapter *adapter)
+{
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return;
+
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ napi_enable(&sds_ring->napi);
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ qlcnic_enable_sds_intr(adapter, sds_ring);
+ }
+
+ if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
+ !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ napi_enable(&tx_ring->napi);
+ qlcnic_enable_tx_intr(adapter, tx_ring);
+ }
+ }
+}
+
+void qlcnic_83xx_napi_disable(struct qlcnic_adapter *adapter)
+{
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_host_tx_ring *tx_ring;
+
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return;
+
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ qlcnic_disable_sds_intr(adapter, sds_ring);
+ napi_synchronize(&sds_ring->napi);
+ napi_disable(&sds_ring->napi);
+ }
+
+ if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
+ !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ qlcnic_disable_tx_intr(adapter, tx_ring);
+ napi_synchronize(&tx_ring->napi);
+ napi_disable(&tx_ring->napi);
+ }
+ }
+}
+
+int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter,
+ struct net_device *netdev)
+{
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+
+ if (qlcnic_alloc_sds_rings(recv_ctx, adapter->drv_sds_rings))
+ return -ENOMEM;
+
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ if (adapter->flags & QLCNIC_MSIX_ENABLED) {
+ if (!(adapter->flags & QLCNIC_TX_INTR_SHARED))
+ netif_napi_add(netdev, &sds_ring->napi,
+ qlcnic_83xx_rx_poll);
+ else
+ netif_napi_add(netdev, &sds_ring->napi,
+ qlcnic_83xx_msix_sriov_vf_poll);
+
+ } else {
+ netif_napi_add(netdev, &sds_ring->napi,
+ qlcnic_83xx_poll);
+ }
+ }
+
+ if (qlcnic_alloc_tx_rings(adapter, netdev)) {
+ qlcnic_free_sds_rings(recv_ctx);
+ return -ENOMEM;
+ }
+
+ if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
+ !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ netif_napi_add_tx(netdev, &tx_ring->napi,
+ qlcnic_83xx_msix_tx_poll);
+ }
+ }
+
+ return 0;
+}
+
+void qlcnic_83xx_napi_del(struct qlcnic_adapter *adapter)
+{
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_host_tx_ring *tx_ring;
+
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ netif_napi_del(&sds_ring->napi);
+ }
+
+ qlcnic_free_sds_rings(adapter->recv_ctx);
+
+ if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
+ !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ netif_napi_del(&tx_ring->napi);
+ }
+ }
+
+ qlcnic_free_tx_rings(adapter);
+}
+
+static void qlcnic_83xx_process_rcv_diag(struct qlcnic_adapter *adapter,
+ int ring, u64 sts_data[])
+{
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct sk_buff *skb;
+ struct qlcnic_host_rds_ring *rds_ring;
+ int index, length;
+
+ if (unlikely(ring >= adapter->max_rds_rings))
+ return;
+
+ rds_ring = &recv_ctx->rds_rings[ring];
+ index = qlcnic_83xx_hndl(sts_data[0]);
+ if (unlikely(index >= rds_ring->num_desc))
+ return;
+
+ length = qlcnic_83xx_pktln(sts_data[0]);
+
+ skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
+ if (!skb)
+ return;
+
+ if (length > rds_ring->skb_size)
+ skb_put(skb, rds_ring->skb_size);
+ else
+ skb_put(skb, length);
+
+ if (!qlcnic_check_loopback_buff(skb->data, adapter->mac_addr))
+ adapter->ahw->diag_cnt++;
+ else
+ dump_skb(skb, adapter);
+
+ dev_kfree_skb_any(skb);
+ return;
+}
+
+void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
+{
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+ struct status_desc *desc;
+ u64 sts_data[2];
+ int ring, opcode;
+ u32 consumer = sds_ring->consumer;
+
+ desc = &sds_ring->desc_head[consumer];
+ sts_data[0] = le64_to_cpu(desc->status_desc_data[0]);
+ sts_data[1] = le64_to_cpu(desc->status_desc_data[1]);
+ opcode = qlcnic_83xx_opcode(sts_data[1]);
+ if (!opcode)
+ return;
+
+ ring = QLCNIC_FETCH_RING_ID(sts_data[0]);
+ qlcnic_83xx_process_rcv_diag(adapter, ring, sts_data);
+ desc = &sds_ring->desc_head[consumer];
+ desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
+ consumer = get_next_index(consumer, sds_ring->num_desc);
+ sds_ring->consumer = consumer;
+ writel(consumer, sds_ring->crb_sts_consumer);
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
new file mode 100644
index 000000000..44dac3c09
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -0,0 +1,4259 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/interrupt.h>
+#include <linux/swab.h>
+#include <linux/dma-mapping.h>
+#include <linux/if_vlan.h>
+#include <net/ip.h>
+#include <linux/ipv6.h>
+#include <linux/inetdevice.h>
+#include <linux/aer.h>
+#include <linux/log2.h>
+#include <linux/pci.h>
+#include <net/vxlan.h>
+
+#include "qlcnic.h"
+#include "qlcnic_sriov.h"
+#include "qlcnic_hw.h"
+
+MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(QLCNIC_LINUX_VERSIONID);
+MODULE_FIRMWARE(QLCNIC_UNIFIED_ROMIMAGE_NAME);
+
+char qlcnic_driver_name[] = "qlcnic";
+static const char qlcnic_driver_string[] = "QLogic 1/10 GbE "
+ "Converged/Intelligent Ethernet Driver v" QLCNIC_LINUX_VERSIONID;
+
+static int qlcnic_mac_learn;
+module_param(qlcnic_mac_learn, int, 0444);
+MODULE_PARM_DESC(qlcnic_mac_learn,
+ "Mac Filter (0=learning is disabled, 1=Driver learning is enabled, 2=FDB learning is enabled)");
+
+int qlcnic_use_msi = 1;
+MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled)");
+module_param_named(use_msi, qlcnic_use_msi, int, 0444);
+
+int qlcnic_use_msi_x = 1;
+MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled)");
+module_param_named(use_msi_x, qlcnic_use_msi_x, int, 0444);
+
+int qlcnic_auto_fw_reset = 1;
+MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled)");
+module_param_named(auto_fw_reset, qlcnic_auto_fw_reset, int, 0644);
+
+int qlcnic_load_fw_file;
+MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file, 2=POST in fast mode, 3= POST in medium mode, 4=POST in slow mode)");
+module_param_named(load_fw_file, qlcnic_load_fw_file, int, 0444);
+
+static int qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+static void qlcnic_remove(struct pci_dev *pdev);
+static int qlcnic_open(struct net_device *netdev);
+static int qlcnic_close(struct net_device *netdev);
+static void qlcnic_tx_timeout(struct net_device *netdev, unsigned int txqueue);
+static void qlcnic_attach_work(struct work_struct *work);
+static void qlcnic_fwinit_work(struct work_struct *work);
+
+static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding);
+static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
+
+static irqreturn_t qlcnic_tmp_intr(int irq, void *data);
+static irqreturn_t qlcnic_intr(int irq, void *data);
+static irqreturn_t qlcnic_msi_intr(int irq, void *data);
+static irqreturn_t qlcnic_msix_intr(int irq, void *data);
+static irqreturn_t qlcnic_msix_tx_intr(int irq, void *data);
+
+static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev);
+static int qlcnic_start_firmware(struct qlcnic_adapter *);
+
+static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter);
+static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *);
+static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
+static int qlcnic_vlan_rx_add(struct net_device *, __be16, u16);
+static int qlcnic_vlan_rx_del(struct net_device *, __be16, u16);
+
+static int qlcnic_82xx_setup_intr(struct qlcnic_adapter *);
+static void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *, u32);
+static irqreturn_t qlcnic_82xx_clear_legacy_intr(struct qlcnic_adapter *);
+static pci_ers_result_t qlcnic_82xx_io_slot_reset(struct pci_dev *);
+static int qlcnic_82xx_start_firmware(struct qlcnic_adapter *);
+static void qlcnic_82xx_io_resume(struct pci_dev *);
+static void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *);
+static pci_ers_result_t qlcnic_82xx_io_error_detected(struct pci_dev *,
+ pci_channel_state_t);
+static u32 qlcnic_vlan_tx_check(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (adapter->pdev->device == PCI_DEVICE_ID_QLOGIC_QLE824X)
+ return ahw->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX;
+ else
+ return 1;
+}
+
+/* PCI Device ID Table */
+#define ENTRY(device) \
+ {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
+ .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
+
+static const struct pci_device_id qlcnic_pci_tbl[] = {
+ ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
+ ENTRY(PCI_DEVICE_ID_QLOGIC_QLE834X),
+ ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE834X),
+ ENTRY(PCI_DEVICE_ID_QLOGIC_QLE8830),
+ ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE8C30),
+ ENTRY(PCI_DEVICE_ID_QLOGIC_QLE844X),
+ ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE844X),
+ {0,}
+};
+
+MODULE_DEVICE_TABLE(pci, qlcnic_pci_tbl);
+
+
+inline void qlcnic_update_cmd_producer(struct qlcnic_host_tx_ring *tx_ring)
+{
+ writel(tx_ring->producer, tx_ring->crb_cmd_producer);
+}
+
+static const u32 msi_tgt_status[8] = {
+ ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
+ ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
+ ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
+ ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
+};
+
+static const u32 qlcnic_reg_tbl[] = {
+ 0x1B20A8, /* PEG_HALT_STAT1 */
+ 0x1B20AC, /* PEG_HALT_STAT2 */
+ 0x1B20B0, /* FW_HEARTBEAT */
+ 0x1B2100, /* LOCK ID */
+ 0x1B2128, /* FW_CAPABILITIES */
+ 0x1B2138, /* drv active */
+ 0x1B2140, /* dev state */
+ 0x1B2144, /* drv state */
+ 0x1B2148, /* drv scratch */
+ 0x1B214C, /* dev partition info */
+ 0x1B2174, /* drv idc ver */
+ 0x1B2150, /* fw version major */
+ 0x1B2154, /* fw version minor */
+ 0x1B2158, /* fw version sub */
+ 0x1B219C, /* npar state */
+ 0x1B21FC, /* FW_IMG_VALID */
+ 0x1B2250, /* CMD_PEG_STATE */
+ 0x1B233C, /* RCV_PEG_STATE */
+ 0x1B23B4, /* ASIC TEMP */
+ 0x1B216C, /* FW api */
+ 0x1B2170, /* drv op mode */
+ 0x13C010, /* flash lock */
+ 0x13C014, /* flash unlock */
+};
+
+static const struct qlcnic_board_info qlcnic_boards[] = {
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE844X,
+ 0x0,
+ 0x0,
+ "8400 series 10GbE Converged Network Adapter (TCP/IP Networking)" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE834X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x24e,
+ "8300 Series Dual Port 10GbE Converged Network Adapter "
+ "(TCP/IP Networking)" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE834X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x243,
+ "8300 Series Single Port 10GbE Converged Network Adapter "
+ "(TCP/IP Networking)" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE834X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x24a,
+ "8300 Series Dual Port 10GbE Converged Network Adapter "
+ "(TCP/IP Networking)" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE834X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x246,
+ "8300 Series Dual Port 10GbE Converged Network Adapter "
+ "(TCP/IP Networking)" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE834X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x252,
+ "8300 Series Dual Port 10GbE Converged Network Adapter "
+ "(TCP/IP Networking)" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE834X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x26e,
+ "8300 Series Dual Port 10GbE Converged Network Adapter "
+ "(TCP/IP Networking)" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE834X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x260,
+ "8300 Series Dual Port 10GbE Converged Network Adapter "
+ "(TCP/IP Networking)" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE834X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x266,
+ "8300 Series Single Port 10GbE Converged Network Adapter "
+ "(TCP/IP Networking)" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE834X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x269,
+ "8300 Series Dual Port 10GbE Converged Network Adapter "
+ "(TCP/IP Networking)" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE834X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x271,
+ "8300 Series Dual Port 10GbE Converged Network Adapter "
+ "(TCP/IP Networking)" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE834X,
+ 0x0, 0x0, "8300 Series 1/10GbE Controller" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE8830,
+ 0x0,
+ 0x0,
+ "8830 Series 1/10GbE Controller" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE824X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x203,
+ "8200 Series Single Port 10GbE Converged Network Adapter"
+ "(TCP/IP Networking)" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE824X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x207,
+ "8200 Series Dual Port 10GbE Converged Network Adapter"
+ "(TCP/IP Networking)" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE824X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x20b,
+ "3200 Series Dual Port 10Gb Intelligent Ethernet Adapter" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE824X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x20c,
+ "3200 Series Quad Port 1Gb Intelligent Ethernet Adapter" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE824X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x20f,
+ "3200 Series Single Port 10Gb Intelligent Ethernet Adapter" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE824X,
+ 0x103c, 0x3733,
+ "NC523SFP 10Gb 2-port Server Adapter" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE824X,
+ 0x103c, 0x3346,
+ "CN1000Q Dual Port Converged Network Adapter" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE824X,
+ PCI_VENDOR_ID_QLOGIC,
+ 0x210,
+ "QME8242-k 10GbE Dual Port Mezzanine Card" },
+ { PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE824X,
+ 0x0, 0x0, "cLOM8214 1/10GbE Controller" },
+};
+
+#define NUM_SUPPORTED_BOARDS ARRAY_SIZE(qlcnic_boards)
+
+static const
+struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG;
+
+int qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
+{
+ int size = sizeof(struct qlcnic_host_sds_ring) * count;
+
+ recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
+
+ return recv_ctx->sds_rings == NULL;
+}
+
+void qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
+{
+ kfree(recv_ctx->sds_rings);
+ recv_ctx->sds_rings = NULL;
+}
+
+int qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ u8 mac_addr[ETH_ALEN];
+ int ret;
+
+ ret = qlcnic_get_mac_address(adapter, mac_addr,
+ adapter->ahw->pci_func);
+ if (ret)
+ return ret;
+
+ eth_hw_addr_set(netdev, mac_addr);
+ memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
+
+ /* set station address */
+
+ if (!is_valid_ether_addr(netdev->dev_addr))
+ dev_warn(&pdev->dev, "Bad MAC address %pM.\n",
+ netdev->dev_addr);
+
+ return 0;
+}
+
+static void qlcnic_delete_adapter_mac(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_mac_vlan_list *cur;
+
+ list_for_each_entry(cur, &adapter->mac_list, list) {
+ if (ether_addr_equal_unaligned(adapter->mac_addr, cur->mac_addr)) {
+ qlcnic_sre_macaddr_change(adapter, cur->mac_addr,
+ 0, QLCNIC_MAC_DEL);
+ list_del(&cur->list);
+ kfree(cur);
+ return;
+ }
+ }
+}
+
+static int qlcnic_set_mac(struct net_device *netdev, void *p)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct sockaddr *addr = p;
+
+ if (qlcnic_sriov_vf_check(adapter))
+ return -EINVAL;
+
+ if ((adapter->flags & QLCNIC_MAC_OVERRIDE_DISABLED))
+ return -EOPNOTSUPP;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EINVAL;
+
+ if (ether_addr_equal_unaligned(adapter->mac_addr, addr->sa_data) &&
+ ether_addr_equal_unaligned(netdev->dev_addr, addr->sa_data))
+ return 0;
+
+ if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
+ netif_device_detach(netdev);
+ qlcnic_napi_disable(adapter);
+ }
+
+ qlcnic_delete_adapter_mac(adapter);
+ memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
+ qlcnic_set_multi(adapter->netdev);
+
+ if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
+ netif_device_attach(netdev);
+ qlcnic_napi_enable(adapter);
+ }
+ return 0;
+}
+
+static int qlcnic_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *netdev,
+ const unsigned char *addr, u16 vid,
+ struct netlink_ext_ack *extack)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int err = -EOPNOTSUPP;
+
+ if (!adapter->fdb_mac_learn)
+ return ndo_dflt_fdb_del(ndm, tb, netdev, addr, vid);
+
+ if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
+ qlcnic_sriov_check(adapter)) {
+ if (is_unicast_ether_addr(addr)) {
+ err = dev_uc_del(netdev, addr);
+ if (!err)
+ err = qlcnic_nic_del_mac(adapter, addr);
+ } else if (is_multicast_ether_addr(addr)) {
+ err = dev_mc_del(netdev, addr);
+ } else {
+ err = -EINVAL;
+ }
+ }
+ return err;
+}
+
+static int qlcnic_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *netdev,
+ const unsigned char *addr, u16 vid, u16 flags,
+ struct netlink_ext_ack *extack)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int err = 0;
+
+ if (!adapter->fdb_mac_learn)
+ return ndo_dflt_fdb_add(ndm, tb, netdev, addr, vid, flags);
+
+ if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) &&
+ !qlcnic_sriov_check(adapter)) {
+ pr_info("%s: FDB e-switch is not enabled\n", __func__);
+ return -EOPNOTSUPP;
+ }
+
+ if (ether_addr_equal(addr, adapter->mac_addr))
+ return err;
+
+ if (is_unicast_ether_addr(addr)) {
+ if (netdev_uc_count(netdev) < adapter->ahw->max_uc_count)
+ err = dev_uc_add_excl(netdev, addr);
+ else
+ err = -ENOMEM;
+ } else if (is_multicast_ether_addr(addr)) {
+ err = dev_mc_add_excl(netdev, addr);
+ } else {
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static int qlcnic_fdb_dump(struct sk_buff *skb, struct netlink_callback *ncb,
+ struct net_device *netdev,
+ struct net_device *filter_dev, int *idx)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int err = 0;
+
+ if (!adapter->fdb_mac_learn)
+ return ndo_dflt_fdb_dump(skb, ncb, netdev, filter_dev, idx);
+
+ if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
+ qlcnic_sriov_check(adapter))
+ err = ndo_dflt_fdb_dump(skb, ncb, netdev, filter_dev, idx);
+
+ return err;
+}
+
+static void qlcnic_82xx_cancel_idc_work(struct qlcnic_adapter *adapter)
+{
+ while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ usleep_range(10000, 11000);
+
+ if (!adapter->fw_work.work.func)
+ return;
+
+ cancel_delayed_work_sync(&adapter->fw_work);
+}
+
+static int qlcnic_get_phys_port_id(struct net_device *netdev,
+ struct netdev_phys_item_id *ppid)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (!(adapter->flags & QLCNIC_HAS_PHYS_PORT_ID))
+ return -EOPNOTSUPP;
+
+ ppid->id_len = sizeof(ahw->phys_port_id);
+ memcpy(ppid->id, ahw->phys_port_id, ppid->id_len);
+
+ return 0;
+}
+
+static int qlcnic_udp_tunnel_sync(struct net_device *dev, unsigned int table)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ struct udp_tunnel_info ti;
+ int err;
+
+ udp_tunnel_nic_get_port(dev, table, 0, &ti);
+ if (ti.port) {
+ err = qlcnic_set_vxlan_port(adapter, ntohs(ti.port));
+ if (err)
+ return err;
+ }
+
+ return qlcnic_set_vxlan_parsing(adapter, ntohs(ti.port));
+}
+
+static const struct udp_tunnel_nic_info qlcnic_udp_tunnels = {
+ .sync_table = qlcnic_udp_tunnel_sync,
+ .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP,
+ .tables = {
+ { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
+ },
+};
+
+static netdev_features_t qlcnic_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
+{
+ features = vlan_features_check(skb, features);
+ return vxlan_features_check(skb, features);
+}
+
+static const struct net_device_ops qlcnic_netdev_ops = {
+ .ndo_open = qlcnic_open,
+ .ndo_stop = qlcnic_close,
+ .ndo_start_xmit = qlcnic_xmit_frame,
+ .ndo_get_stats = qlcnic_get_stats,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_rx_mode = qlcnic_set_multi,
+ .ndo_set_mac_address = qlcnic_set_mac,
+ .ndo_change_mtu = qlcnic_change_mtu,
+ .ndo_fix_features = qlcnic_fix_features,
+ .ndo_set_features = qlcnic_set_features,
+ .ndo_tx_timeout = qlcnic_tx_timeout,
+ .ndo_vlan_rx_add_vid = qlcnic_vlan_rx_add,
+ .ndo_vlan_rx_kill_vid = qlcnic_vlan_rx_del,
+ .ndo_fdb_add = qlcnic_fdb_add,
+ .ndo_fdb_del = qlcnic_fdb_del,
+ .ndo_fdb_dump = qlcnic_fdb_dump,
+ .ndo_get_phys_port_id = qlcnic_get_phys_port_id,
+ .ndo_features_check = qlcnic_features_check,
+#ifdef CONFIG_QLCNIC_SRIOV
+ .ndo_set_vf_mac = qlcnic_sriov_set_vf_mac,
+ .ndo_set_vf_rate = qlcnic_sriov_set_vf_tx_rate,
+ .ndo_get_vf_config = qlcnic_sriov_get_vf_config,
+ .ndo_set_vf_vlan = qlcnic_sriov_set_vf_vlan,
+ .ndo_set_vf_spoofchk = qlcnic_sriov_set_vf_spoofchk,
+#endif
+};
+
+static const struct net_device_ops qlcnic_netdev_failed_ops = {
+ .ndo_open = qlcnic_open,
+};
+
+static struct qlcnic_nic_template qlcnic_ops = {
+ .config_bridged_mode = qlcnic_config_bridged_mode,
+ .config_led = qlcnic_82xx_config_led,
+ .start_firmware = qlcnic_82xx_start_firmware,
+ .request_reset = qlcnic_82xx_dev_request_reset,
+ .cancel_idc_work = qlcnic_82xx_cancel_idc_work,
+ .napi_add = qlcnic_82xx_napi_add,
+ .napi_del = qlcnic_82xx_napi_del,
+ .config_ipaddr = qlcnic_82xx_config_ipaddr,
+ .shutdown = qlcnic_82xx_shutdown,
+ .resume = qlcnic_82xx_resume,
+ .clear_legacy_intr = qlcnic_82xx_clear_legacy_intr,
+};
+
+struct qlcnic_nic_template qlcnic_vf_ops = {
+ .config_bridged_mode = qlcnicvf_config_bridged_mode,
+ .config_led = qlcnicvf_config_led,
+ .start_firmware = qlcnicvf_start_firmware
+};
+
+static struct qlcnic_hardware_ops qlcnic_hw_ops = {
+ .read_crb = qlcnic_82xx_read_crb,
+ .write_crb = qlcnic_82xx_write_crb,
+ .read_reg = qlcnic_82xx_hw_read_wx_2M,
+ .write_reg = qlcnic_82xx_hw_write_wx_2M,
+ .get_mac_address = qlcnic_82xx_get_mac_address,
+ .setup_intr = qlcnic_82xx_setup_intr,
+ .alloc_mbx_args = qlcnic_82xx_alloc_mbx_args,
+ .mbx_cmd = qlcnic_82xx_issue_cmd,
+ .get_func_no = qlcnic_82xx_get_func_no,
+ .api_lock = qlcnic_82xx_api_lock,
+ .api_unlock = qlcnic_82xx_api_unlock,
+ .add_sysfs = qlcnic_82xx_add_sysfs,
+ .remove_sysfs = qlcnic_82xx_remove_sysfs,
+ .process_lb_rcv_ring_diag = qlcnic_82xx_process_rcv_ring_diag,
+ .create_rx_ctx = qlcnic_82xx_fw_cmd_create_rx_ctx,
+ .create_tx_ctx = qlcnic_82xx_fw_cmd_create_tx_ctx,
+ .del_rx_ctx = qlcnic_82xx_fw_cmd_del_rx_ctx,
+ .del_tx_ctx = qlcnic_82xx_fw_cmd_del_tx_ctx,
+ .setup_link_event = qlcnic_82xx_linkevent_request,
+ .get_nic_info = qlcnic_82xx_get_nic_info,
+ .get_pci_info = qlcnic_82xx_get_pci_info,
+ .set_nic_info = qlcnic_82xx_set_nic_info,
+ .change_macvlan = qlcnic_82xx_sre_macaddr_change,
+ .napi_enable = qlcnic_82xx_napi_enable,
+ .napi_disable = qlcnic_82xx_napi_disable,
+ .config_intr_coal = qlcnic_82xx_config_intr_coalesce,
+ .config_rss = qlcnic_82xx_config_rss,
+ .config_hw_lro = qlcnic_82xx_config_hw_lro,
+ .config_loopback = qlcnic_82xx_set_lb_mode,
+ .clear_loopback = qlcnic_82xx_clear_lb_mode,
+ .config_promisc_mode = qlcnic_82xx_nic_set_promisc,
+ .change_l2_filter = qlcnic_82xx_change_filter,
+ .get_board_info = qlcnic_82xx_get_board_info,
+ .set_mac_filter_count = qlcnic_82xx_set_mac_filter_count,
+ .free_mac_list = qlcnic_82xx_free_mac_list,
+ .read_phys_port_id = qlcnic_82xx_read_phys_port_id,
+ .io_error_detected = qlcnic_82xx_io_error_detected,
+ .io_slot_reset = qlcnic_82xx_io_slot_reset,
+ .io_resume = qlcnic_82xx_io_resume,
+ .get_beacon_state = qlcnic_82xx_get_beacon_state,
+ .enable_sds_intr = qlcnic_82xx_enable_sds_intr,
+ .disable_sds_intr = qlcnic_82xx_disable_sds_intr,
+ .enable_tx_intr = qlcnic_82xx_enable_tx_intr,
+ .disable_tx_intr = qlcnic_82xx_disable_tx_intr,
+ .get_saved_state = qlcnic_82xx_get_saved_state,
+ .set_saved_state = qlcnic_82xx_set_saved_state,
+ .cache_tmpl_hdr_values = qlcnic_82xx_cache_tmpl_hdr_values,
+ .get_cap_size = qlcnic_82xx_get_cap_size,
+ .set_sys_info = qlcnic_82xx_set_sys_info,
+ .store_cap_mask = qlcnic_82xx_store_cap_mask,
+ .encap_rx_offload = qlcnic_82xx_encap_rx_offload,
+ .encap_tx_offload = qlcnic_82xx_encap_tx_offload,
+};
+
+static int qlcnic_check_multi_tx_capability(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (qlcnic_82xx_check(adapter) &&
+ (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_MULTI_TX)) {
+ test_and_set_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state);
+ return 0;
+ } else {
+ return 1;
+ }
+}
+
+static int qlcnic_max_rings(struct qlcnic_adapter *adapter, u8 ring_cnt,
+ int queue_type)
+{
+ int num_rings, max_rings = QLCNIC_MAX_SDS_RINGS;
+
+ if (queue_type == QLCNIC_RX_QUEUE)
+ max_rings = adapter->max_sds_rings;
+ else if (queue_type == QLCNIC_TX_QUEUE)
+ max_rings = adapter->max_tx_rings;
+
+ num_rings = rounddown_pow_of_two(min_t(int, num_online_cpus(),
+ max_rings));
+
+ if (ring_cnt > num_rings)
+ return num_rings;
+ else
+ return ring_cnt;
+}
+
+void qlcnic_set_tx_ring_count(struct qlcnic_adapter *adapter, u8 tx_cnt)
+{
+ /* 83xx adapter does not have max_tx_rings intialized in probe */
+ if (adapter->max_tx_rings)
+ adapter->drv_tx_rings = qlcnic_max_rings(adapter, tx_cnt,
+ QLCNIC_TX_QUEUE);
+ else
+ adapter->drv_tx_rings = tx_cnt;
+}
+
+void qlcnic_set_sds_ring_count(struct qlcnic_adapter *adapter, u8 rx_cnt)
+{
+ /* 83xx adapter does not have max_sds_rings intialized in probe */
+ if (adapter->max_sds_rings)
+ adapter->drv_sds_rings = qlcnic_max_rings(adapter, rx_cnt,
+ QLCNIC_RX_QUEUE);
+ else
+ adapter->drv_sds_rings = rx_cnt;
+}
+
+int qlcnic_setup_tss_rss_intr(struct qlcnic_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int num_msix = 0, err = 0, vector;
+
+ adapter->flags &= ~QLCNIC_TSS_RSS;
+
+ if (adapter->drv_tss_rings > 0)
+ num_msix += adapter->drv_tss_rings;
+ else
+ num_msix += adapter->drv_tx_rings;
+
+ if (adapter->drv_rss_rings > 0)
+ num_msix += adapter->drv_rss_rings;
+ else
+ num_msix += adapter->drv_sds_rings;
+
+ if (qlcnic_83xx_check(adapter))
+ num_msix += 1;
+
+ if (!adapter->msix_entries) {
+ adapter->msix_entries = kcalloc(num_msix,
+ sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!adapter->msix_entries)
+ return -ENOMEM;
+ }
+
+ for (vector = 0; vector < num_msix; vector++)
+ adapter->msix_entries[vector].entry = vector;
+
+restore:
+ err = pci_enable_msix_exact(pdev, adapter->msix_entries, num_msix);
+ if (err == -ENOSPC) {
+ if (!adapter->drv_tss_rings && !adapter->drv_rss_rings)
+ return err;
+
+ netdev_info(adapter->netdev,
+ "Unable to allocate %d MSI-X vectors, Available vectors %d\n",
+ num_msix, err);
+
+ num_msix = adapter->drv_tx_rings + adapter->drv_sds_rings;
+
+ /* Set rings to 0 so we can restore original TSS/RSS count */
+ adapter->drv_tss_rings = 0;
+ adapter->drv_rss_rings = 0;
+
+ if (qlcnic_83xx_check(adapter))
+ num_msix += 1;
+
+ netdev_info(adapter->netdev,
+ "Restoring %d Tx, %d SDS rings for total %d vectors.\n",
+ adapter->drv_tx_rings, adapter->drv_sds_rings,
+ num_msix);
+
+ goto restore;
+ } else if (err < 0) {
+ return err;
+ }
+
+ adapter->ahw->num_msix = num_msix;
+ if (adapter->drv_tss_rings > 0)
+ adapter->drv_tx_rings = adapter->drv_tss_rings;
+
+ if (adapter->drv_rss_rings > 0)
+ adapter->drv_sds_rings = adapter->drv_rss_rings;
+
+ return 0;
+}
+
+int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int err, vector;
+
+ if (!adapter->msix_entries) {
+ adapter->msix_entries = kcalloc(num_msix,
+ sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!adapter->msix_entries)
+ return -ENOMEM;
+ }
+
+ adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
+
+ if (adapter->ahw->msix_supported) {
+enable_msix:
+ for (vector = 0; vector < num_msix; vector++)
+ adapter->msix_entries[vector].entry = vector;
+
+ err = pci_enable_msix_range(pdev,
+ adapter->msix_entries, 1, num_msix);
+
+ if (err == num_msix) {
+ adapter->flags |= QLCNIC_MSIX_ENABLED;
+ adapter->ahw->num_msix = num_msix;
+ dev_info(&pdev->dev, "using msi-x interrupts\n");
+ return 0;
+ } else if (err > 0) {
+ pci_disable_msix(pdev);
+
+ dev_info(&pdev->dev,
+ "Unable to allocate %d MSI-X vectors, Available vectors %d\n",
+ num_msix, err);
+
+ if (qlcnic_82xx_check(adapter)) {
+ num_msix = rounddown_pow_of_two(err);
+ if (err < QLCNIC_82XX_MINIMUM_VECTOR)
+ return -ENOSPC;
+ } else {
+ num_msix = rounddown_pow_of_two(err - 1);
+ num_msix += 1;
+ if (err < QLCNIC_83XX_MINIMUM_VECTOR)
+ return -ENOSPC;
+ }
+
+ if (qlcnic_82xx_check(adapter) &&
+ !qlcnic_check_multi_tx(adapter)) {
+ adapter->drv_sds_rings = num_msix;
+ adapter->drv_tx_rings = QLCNIC_SINGLE_RING;
+ } else {
+ /* Distribute vectors equally */
+ adapter->drv_tx_rings = num_msix / 2;
+ adapter->drv_sds_rings = adapter->drv_tx_rings;
+ }
+
+ if (num_msix) {
+ dev_info(&pdev->dev,
+ "Trying to allocate %d MSI-X interrupt vectors\n",
+ num_msix);
+ goto enable_msix;
+ }
+ } else {
+ dev_info(&pdev->dev,
+ "Unable to allocate %d MSI-X vectors, err=%d\n",
+ num_msix, err);
+ return err;
+ }
+ }
+
+ return -EIO;
+}
+
+static int qlcnic_82xx_calculate_msix_vector(struct qlcnic_adapter *adapter)
+{
+ int num_msix;
+
+ num_msix = adapter->drv_sds_rings;
+
+ if (qlcnic_check_multi_tx(adapter))
+ num_msix += adapter->drv_tx_rings;
+ else
+ num_msix += QLCNIC_SINGLE_RING;
+
+ return num_msix;
+}
+
+static int qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter)
+{
+ int err = 0;
+ u32 offset, mask_reg;
+ const struct qlcnic_legacy_intr_set *legacy_intrp;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct pci_dev *pdev = adapter->pdev;
+
+ if (qlcnic_use_msi && !pci_enable_msi(pdev)) {
+ adapter->flags |= QLCNIC_MSI_ENABLED;
+ offset = msi_tgt_status[adapter->ahw->pci_func];
+ adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter->ahw,
+ offset);
+ dev_info(&pdev->dev, "using msi interrupts\n");
+ adapter->msix_entries[0].vector = pdev->irq;
+ return err;
+ }
+
+ if (qlcnic_use_msi || qlcnic_use_msi_x)
+ return -EOPNOTSUPP;
+
+ legacy_intrp = &legacy_intr[adapter->ahw->pci_func];
+ adapter->ahw->int_vec_bit = legacy_intrp->int_vec_bit;
+ offset = legacy_intrp->tgt_status_reg;
+ adapter->tgt_status_reg = qlcnic_get_ioaddr(ahw, offset);
+ mask_reg = legacy_intrp->tgt_mask_reg;
+ adapter->tgt_mask_reg = qlcnic_get_ioaddr(ahw, mask_reg);
+ adapter->isr_int_vec = qlcnic_get_ioaddr(ahw, ISR_INT_VECTOR);
+ adapter->crb_int_state_reg = qlcnic_get_ioaddr(ahw, ISR_INT_STATE_REG);
+ dev_info(&pdev->dev, "using legacy interrupts\n");
+ adapter->msix_entries[0].vector = pdev->irq;
+ return err;
+}
+
+static int qlcnic_82xx_setup_intr(struct qlcnic_adapter *adapter)
+{
+ int num_msix, err = 0;
+
+ if (adapter->flags & QLCNIC_TSS_RSS) {
+ err = qlcnic_setup_tss_rss_intr(adapter);
+ if (err < 0)
+ return err;
+ num_msix = adapter->ahw->num_msix;
+ } else {
+ num_msix = qlcnic_82xx_calculate_msix_vector(adapter);
+
+ err = qlcnic_enable_msix(adapter, num_msix);
+ if (err == -ENOMEM)
+ return err;
+
+ if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ qlcnic_disable_multi_tx(adapter);
+ adapter->drv_sds_rings = QLCNIC_SINGLE_RING;
+
+ err = qlcnic_enable_msi_legacy(adapter);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+int qlcnic_82xx_mq_intrpt(struct qlcnic_adapter *adapter, int op_type)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int err, i;
+
+ if (qlcnic_check_multi_tx(adapter) &&
+ !ahw->diag_test &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ ahw->intr_tbl =
+ vzalloc(array_size(sizeof(struct qlcnic_intrpt_config),
+ ahw->num_msix));
+ if (!ahw->intr_tbl)
+ return -ENOMEM;
+
+ for (i = 0; i < ahw->num_msix; i++) {
+ ahw->intr_tbl[i].type = QLCNIC_INTRPT_MSIX;
+ ahw->intr_tbl[i].id = i;
+ ahw->intr_tbl[i].src = 0;
+ }
+
+ err = qlcnic_82xx_config_intrpt(adapter, 1);
+ if (err)
+ dev_err(&adapter->pdev->dev,
+ "Failed to configure Interrupt for %d vector\n",
+ ahw->num_msix);
+ return err;
+ }
+
+ return 0;
+}
+
+void qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
+{
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ pci_disable_msix(adapter->pdev);
+ if (adapter->flags & QLCNIC_MSI_ENABLED)
+ pci_disable_msi(adapter->pdev);
+
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+
+ if (adapter->ahw->intr_tbl) {
+ vfree(adapter->ahw->intr_tbl);
+ adapter->ahw->intr_tbl = NULL;
+ }
+}
+
+static void qlcnic_cleanup_pci_map(struct qlcnic_hardware_context *ahw)
+{
+ if (ahw->pci_base0 != NULL)
+ iounmap(ahw->pci_base0);
+}
+
+static int qlcnic_get_act_pci_func(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_pci_info *pci_info;
+ int ret;
+
+ if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
+ switch (ahw->port_type) {
+ case QLCNIC_GBE:
+ ahw->total_nic_func = QLCNIC_NIU_MAX_GBE_PORTS;
+ break;
+ case QLCNIC_XGBE:
+ ahw->total_nic_func = QLCNIC_NIU_MAX_XG_PORTS;
+ break;
+ }
+ return 0;
+ }
+
+ if (ahw->op_mode == QLCNIC_MGMT_FUNC)
+ return 0;
+
+ pci_info = kcalloc(ahw->max_vnic_func, sizeof(*pci_info), GFP_KERNEL);
+ if (!pci_info)
+ return -ENOMEM;
+
+ ret = qlcnic_get_pci_info(adapter, pci_info);
+ kfree(pci_info);
+ return ret;
+}
+
+static bool qlcnic_port_eswitch_cfg_capability(struct qlcnic_adapter *adapter)
+{
+ bool ret = false;
+
+ if (qlcnic_84xx_check(adapter)) {
+ ret = true;
+ } else if (qlcnic_83xx_check(adapter)) {
+ if (adapter->ahw->extra_capability[0] &
+ QLCNIC_FW_CAPABILITY_2_PER_PORT_ESWITCH_CFG)
+ ret = true;
+ else
+ ret = false;
+ }
+
+ return ret;
+}
+
+int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_pci_info *pci_info;
+ int i, id = 0, ret = 0, j = 0;
+ u16 act_pci_func;
+ u8 pfn;
+
+ pci_info = kcalloc(ahw->max_vnic_func, sizeof(*pci_info), GFP_KERNEL);
+ if (!pci_info)
+ return -ENOMEM;
+
+ ret = qlcnic_get_pci_info(adapter, pci_info);
+ if (ret)
+ goto err_pci_info;
+
+ act_pci_func = ahw->total_nic_func;
+
+ adapter->npars = kcalloc(act_pci_func,
+ sizeof(struct qlcnic_npar_info),
+ GFP_KERNEL);
+ if (!adapter->npars) {
+ ret = -ENOMEM;
+ goto err_pci_info;
+ }
+
+ adapter->eswitch = kcalloc(QLCNIC_NIU_MAX_XG_PORTS,
+ sizeof(struct qlcnic_eswitch),
+ GFP_KERNEL);
+ if (!adapter->eswitch) {
+ ret = -ENOMEM;
+ goto err_npars;
+ }
+
+ for (i = 0; i < ahw->max_vnic_func; i++) {
+ pfn = pci_info[i].id;
+
+ if (pfn >= ahw->max_vnic_func) {
+ ret = -EINVAL;
+ dev_err(&adapter->pdev->dev, "%s: Invalid function 0x%x, max 0x%x\n",
+ __func__, pfn, ahw->max_vnic_func);
+ goto err_eswitch;
+ }
+
+ if (!pci_info[i].active ||
+ (pci_info[i].type != QLCNIC_TYPE_NIC))
+ continue;
+
+ if (qlcnic_port_eswitch_cfg_capability(adapter)) {
+ if (!qlcnic_83xx_set_port_eswitch_status(adapter, pfn,
+ &id))
+ adapter->npars[j].eswitch_status = true;
+ else
+ continue;
+ } else {
+ adapter->npars[j].eswitch_status = true;
+ }
+
+ adapter->npars[j].pci_func = pfn;
+ adapter->npars[j].active = (u8)pci_info[i].active;
+ adapter->npars[j].type = (u8)pci_info[i].type;
+ adapter->npars[j].phy_port = (u8)pci_info[i].default_port;
+ adapter->npars[j].min_bw = pci_info[i].tx_min_bw;
+ adapter->npars[j].max_bw = pci_info[i].tx_max_bw;
+
+ memcpy(&adapter->npars[j].mac, &pci_info[i].mac, ETH_ALEN);
+ j++;
+ }
+
+ /* Update eSwitch status for adapters without per port eSwitch
+ * configuration capability
+ */
+ if (!qlcnic_port_eswitch_cfg_capability(adapter)) {
+ for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
+ adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
+ }
+
+ kfree(pci_info);
+ return 0;
+
+err_eswitch:
+ kfree(adapter->eswitch);
+ adapter->eswitch = NULL;
+err_npars:
+ kfree(adapter->npars);
+ adapter->npars = NULL;
+err_pci_info:
+ kfree(pci_info);
+
+ return ret;
+}
+
+static int
+qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
+{
+ u8 id;
+ int ret;
+ u32 data = QLCNIC_MGMT_FUNC;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ ret = qlcnic_api_lock(adapter);
+ if (ret)
+ goto err_lock;
+
+ id = ahw->pci_func;
+ data = QLC_SHARED_REG_RD32(adapter, QLCNIC_DRV_OP_MODE);
+ data = (data & ~QLC_DEV_SET_DRV(0xf, id)) |
+ QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC, id);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_DRV_OP_MODE, data);
+ qlcnic_api_unlock(adapter);
+err_lock:
+ return ret;
+}
+
+static void qlcnic_check_vf(struct qlcnic_adapter *adapter,
+ const struct pci_device_id *ent)
+{
+ u32 op_mode, priv_level;
+
+ /* Determine FW API version */
+ adapter->ahw->fw_hal_version = QLC_SHARED_REG_RD32(adapter,
+ QLCNIC_FW_API);
+
+ /* Find PCI function number */
+ qlcnic_get_func_no(adapter);
+
+ /* Determine function privilege level */
+ op_mode = QLC_SHARED_REG_RD32(adapter, QLCNIC_DRV_OP_MODE);
+ if (op_mode == QLC_DEV_DRV_DEFAULT)
+ priv_level = QLCNIC_MGMT_FUNC;
+ else
+ priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
+
+ if (priv_level == QLCNIC_NON_PRIV_FUNC) {
+ adapter->ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
+ dev_info(&adapter->pdev->dev,
+ "HAL Version: %d Non Privileged function\n",
+ adapter->ahw->fw_hal_version);
+ adapter->nic_ops = &qlcnic_vf_ops;
+ } else
+ adapter->nic_ops = &qlcnic_ops;
+}
+
+#define QLCNIC_82XX_BAR0_LENGTH 0x00200000UL
+#define QLCNIC_83XX_BAR0_LENGTH 0x4000
+static void qlcnic_get_bar_length(u32 dev_id, ulong *bar)
+{
+ switch (dev_id) {
+ case PCI_DEVICE_ID_QLOGIC_QLE824X:
+ *bar = QLCNIC_82XX_BAR0_LENGTH;
+ break;
+ case PCI_DEVICE_ID_QLOGIC_QLE834X:
+ case PCI_DEVICE_ID_QLOGIC_QLE8830:
+ case PCI_DEVICE_ID_QLOGIC_QLE844X:
+ case PCI_DEVICE_ID_QLOGIC_VF_QLE834X:
+ case PCI_DEVICE_ID_QLOGIC_VF_QLE844X:
+ case PCI_DEVICE_ID_QLOGIC_VF_QLE8C30:
+ *bar = QLCNIC_83XX_BAR0_LENGTH;
+ break;
+ default:
+ *bar = 0;
+ }
+}
+
+static int qlcnic_setup_pci_map(struct pci_dev *pdev,
+ struct qlcnic_hardware_context *ahw)
+{
+ u32 offset;
+ void __iomem *mem_ptr0 = NULL;
+ unsigned long mem_len, pci_len0 = 0, bar0_len;
+
+ /* remap phys address */
+ mem_len = pci_resource_len(pdev, 0);
+
+ qlcnic_get_bar_length(pdev->device, &bar0_len);
+ if (mem_len >= bar0_len) {
+
+ mem_ptr0 = pci_ioremap_bar(pdev, 0);
+ if (mem_ptr0 == NULL) {
+ dev_err(&pdev->dev, "failed to map PCI bar 0\n");
+ return -EIO;
+ }
+ pci_len0 = mem_len;
+ } else {
+ return -EIO;
+ }
+
+ dev_info(&pdev->dev, "%dKB memory map\n", (int)(mem_len >> 10));
+
+ ahw->pci_base0 = mem_ptr0;
+ ahw->pci_len0 = pci_len0;
+ offset = QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(ahw->pci_func));
+ qlcnic_get_ioaddr(ahw, offset);
+
+ return 0;
+}
+
+static bool qlcnic_validate_subsystem_id(struct qlcnic_adapter *adapter,
+ int index)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ unsigned short subsystem_vendor;
+ bool ret = true;
+
+ subsystem_vendor = pdev->subsystem_vendor;
+
+ if (pdev->device == PCI_DEVICE_ID_QLOGIC_QLE824X ||
+ pdev->device == PCI_DEVICE_ID_QLOGIC_QLE834X) {
+ if (qlcnic_boards[index].sub_vendor == subsystem_vendor &&
+ qlcnic_boards[index].sub_device == pdev->subsystem_device)
+ ret = true;
+ else
+ ret = false;
+ }
+
+ return ret;
+}
+
+static void qlcnic_get_board_name(struct qlcnic_adapter *adapter, char *name)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int i, found = 0;
+
+ for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) {
+ if (qlcnic_boards[i].vendor == pdev->vendor &&
+ qlcnic_boards[i].device == pdev->device &&
+ qlcnic_validate_subsystem_id(adapter, i)) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found)
+ sprintf(name, "%pM Gigabit Ethernet", adapter->mac_addr);
+ else
+ sprintf(name, "%pM: %s" , adapter->mac_addr,
+ qlcnic_boards[i].short_name);
+}
+
+static void
+qlcnic_check_options(struct qlcnic_adapter *adapter)
+{
+ int err;
+ u32 fw_major, fw_minor, fw_build, prev_fw_version;
+ struct pci_dev *pdev = adapter->pdev;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump;
+
+ prev_fw_version = adapter->fw_version;
+
+ fw_major = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MAJOR);
+ fw_minor = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_MINOR);
+ fw_build = QLC_SHARED_REG_RD32(adapter, QLCNIC_FW_VERSION_SUB);
+
+ adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
+
+ err = qlcnic_get_board_info(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Error getting board config info.\n");
+ return;
+ }
+ if (ahw->op_mode != QLCNIC_NON_PRIV_FUNC) {
+ if (fw_dump->tmpl_hdr == NULL ||
+ adapter->fw_version > prev_fw_version) {
+ vfree(fw_dump->tmpl_hdr);
+ if (!qlcnic_fw_cmd_get_minidump_temp(adapter))
+ dev_info(&pdev->dev,
+ "Supports FW dump capability\n");
+ }
+ }
+
+ dev_info(&pdev->dev, "Driver v%s, firmware v%d.%d.%d\n",
+ QLCNIC_LINUX_VERSIONID, fw_major, fw_minor, fw_build);
+
+ if (adapter->ahw->port_type == QLCNIC_XGBE) {
+ if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
+ adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_VF;
+ adapter->max_rxd = MAX_RCV_DESCRIPTORS_VF;
+ } else {
+ adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
+ adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G;
+ }
+
+ adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+ adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+
+ } else if (adapter->ahw->port_type == QLCNIC_GBE) {
+ adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
+ adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+ adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+ adapter->max_rxd = MAX_RCV_DESCRIPTORS_1G;
+ }
+
+ adapter->ahw->msix_supported = !!qlcnic_use_msi_x;
+
+ adapter->num_txd = MAX_CMD_DESCRIPTORS;
+
+ adapter->max_rds_rings = MAX_RDS_RINGS;
+}
+
+static int
+qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_info nic_info;
+ int err = 0;
+
+ memset(&nic_info, 0, sizeof(struct qlcnic_info));
+ err = qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw->pci_func);
+ if (err)
+ return err;
+
+ adapter->ahw->physical_port = (u8)nic_info.phys_port;
+ adapter->ahw->switch_mode = nic_info.switch_mode;
+ adapter->ahw->max_tx_ques = nic_info.max_tx_ques;
+ adapter->ahw->max_rx_ques = nic_info.max_rx_ques;
+ adapter->ahw->capabilities = nic_info.capabilities;
+
+ if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) {
+ u32 temp;
+ temp = QLCRD32(adapter, CRB_FW_CAPABILITIES_2, &err);
+ if (err == -EIO)
+ return err;
+ adapter->ahw->extra_capability[0] = temp;
+ } else {
+ adapter->ahw->extra_capability[0] = 0;
+ }
+
+ adapter->ahw->max_mac_filters = nic_info.max_mac_filters;
+ adapter->ahw->max_mtu = nic_info.max_mtu;
+
+ if (adapter->ahw->capabilities & BIT_6) {
+ adapter->flags |= QLCNIC_ESWITCH_ENABLED;
+ adapter->ahw->nic_mode = QLCNIC_VNIC_MODE;
+ adapter->max_tx_rings = QLCNIC_MAX_HW_VNIC_TX_RINGS;
+ adapter->max_sds_rings = QLCNIC_MAX_VNIC_SDS_RINGS;
+
+ dev_info(&adapter->pdev->dev, "vNIC mode enabled.\n");
+ } else {
+ adapter->ahw->nic_mode = QLCNIC_DEFAULT_MODE;
+ adapter->max_tx_rings = QLCNIC_MAX_HW_TX_RINGS;
+ adapter->max_sds_rings = QLCNIC_MAX_SDS_RINGS;
+ adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
+ }
+
+ return err;
+}
+
+void qlcnic_set_vlan_config(struct qlcnic_adapter *adapter,
+ struct qlcnic_esw_func_cfg *esw_cfg)
+{
+ if (esw_cfg->discard_tagged)
+ adapter->flags &= ~QLCNIC_TAGGING_ENABLED;
+ else
+ adapter->flags |= QLCNIC_TAGGING_ENABLED;
+
+ if (esw_cfg->vlan_id) {
+ adapter->rx_pvid = esw_cfg->vlan_id;
+ adapter->tx_pvid = esw_cfg->vlan_id;
+ } else {
+ adapter->rx_pvid = 0;
+ adapter->tx_pvid = 0;
+ }
+}
+
+static int
+qlcnic_vlan_rx_add(struct net_device *netdev, __be16 proto, u16 vid)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int err;
+
+ if (qlcnic_sriov_vf_check(adapter)) {
+ err = qlcnic_sriov_cfg_vf_guest_vlan(adapter, vid, 1);
+ if (err) {
+ netdev_err(netdev,
+ "Cannot add VLAN filter for VLAN id %d, err=%d",
+ vid, err);
+ return err;
+ }
+ }
+
+ set_bit(vid, adapter->vlans);
+ return 0;
+}
+
+static int
+qlcnic_vlan_rx_del(struct net_device *netdev, __be16 proto, u16 vid)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int err;
+
+ if (qlcnic_sriov_vf_check(adapter)) {
+ err = qlcnic_sriov_cfg_vf_guest_vlan(adapter, vid, 0);
+ if (err) {
+ netdev_err(netdev,
+ "Cannot delete VLAN filter for VLAN id %d, err=%d",
+ vid, err);
+ return err;
+ }
+ }
+
+ qlcnic_restore_indev_addr(netdev, NETDEV_DOWN);
+ clear_bit(vid, adapter->vlans);
+ return 0;
+}
+
+void qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter,
+ struct qlcnic_esw_func_cfg *esw_cfg)
+{
+ adapter->flags &= ~(QLCNIC_MACSPOOF | QLCNIC_MAC_OVERRIDE_DISABLED |
+ QLCNIC_PROMISC_DISABLED);
+
+ if (esw_cfg->mac_anti_spoof)
+ adapter->flags |= QLCNIC_MACSPOOF;
+
+ if (!esw_cfg->mac_override)
+ adapter->flags |= QLCNIC_MAC_OVERRIDE_DISABLED;
+
+ if (!esw_cfg->promisc_mode)
+ adapter->flags |= QLCNIC_PROMISC_DISABLED;
+}
+
+int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_esw_func_cfg esw_cfg;
+
+ if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
+ return 0;
+
+ esw_cfg.pci_func = adapter->ahw->pci_func;
+ if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg))
+ return -EIO;
+ qlcnic_set_vlan_config(adapter, &esw_cfg);
+ qlcnic_set_eswitch_port_features(adapter, &esw_cfg);
+ qlcnic_set_netdev_features(adapter, &esw_cfg);
+
+ return 0;
+}
+
+void qlcnic_set_netdev_features(struct qlcnic_adapter *adapter,
+ struct qlcnic_esw_func_cfg *esw_cfg)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ if (qlcnic_83xx_check(adapter))
+ return;
+
+ adapter->offload_flags = esw_cfg->offload_flags;
+ adapter->flags |= QLCNIC_APP_CHANGED_FLAGS;
+ netdev_update_features(netdev);
+ adapter->flags &= ~QLCNIC_APP_CHANGED_FLAGS;
+}
+
+static int
+qlcnic_check_eswitch_mode(struct qlcnic_adapter *adapter)
+{
+ u32 op_mode, priv_level;
+ int err = 0;
+
+ err = qlcnic_initialize_nic(adapter);
+ if (err)
+ return err;
+
+ if (adapter->flags & QLCNIC_ADAPTER_INITIALIZED)
+ return 0;
+
+ op_mode = QLC_SHARED_REG_RD32(adapter, QLCNIC_DRV_OP_MODE);
+ priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
+
+ if (op_mode == QLC_DEV_DRV_DEFAULT)
+ priv_level = QLCNIC_MGMT_FUNC;
+ else
+ priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
+
+ if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
+ if (priv_level == QLCNIC_MGMT_FUNC) {
+ adapter->ahw->op_mode = QLCNIC_MGMT_FUNC;
+ err = qlcnic_init_pci_info(adapter);
+ if (err)
+ return err;
+ /* Set privilege level for other functions */
+ qlcnic_set_function_modes(adapter);
+ dev_info(&adapter->pdev->dev,
+ "HAL Version: %d, Management function\n",
+ adapter->ahw->fw_hal_version);
+ } else if (priv_level == QLCNIC_PRIV_FUNC) {
+ adapter->ahw->op_mode = QLCNIC_PRIV_FUNC;
+ dev_info(&adapter->pdev->dev,
+ "HAL Version: %d, Privileged function\n",
+ adapter->ahw->fw_hal_version);
+ }
+ } else {
+ adapter->ahw->nic_mode = QLCNIC_DEFAULT_MODE;
+ }
+
+ adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
+
+ return err;
+}
+
+int qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_esw_func_cfg esw_cfg;
+ struct qlcnic_npar_info *npar;
+ u8 i;
+
+ if (adapter->need_fw_reset)
+ return 0;
+
+ for (i = 0; i < adapter->ahw->total_nic_func; i++) {
+ if (!adapter->npars[i].eswitch_status)
+ continue;
+
+ memset(&esw_cfg, 0, sizeof(struct qlcnic_esw_func_cfg));
+ esw_cfg.pci_func = adapter->npars[i].pci_func;
+ esw_cfg.mac_override = BIT_0;
+ esw_cfg.promisc_mode = BIT_0;
+ if (qlcnic_82xx_check(adapter)) {
+ esw_cfg.offload_flags = BIT_0;
+ if (QLCNIC_IS_TSO_CAPABLE(adapter))
+ esw_cfg.offload_flags |= (BIT_1 | BIT_2);
+ }
+ if (qlcnic_config_switch_port(adapter, &esw_cfg))
+ return -EIO;
+ npar = &adapter->npars[i];
+ npar->pvid = esw_cfg.vlan_id;
+ npar->mac_override = esw_cfg.mac_override;
+ npar->mac_anti_spoof = esw_cfg.mac_anti_spoof;
+ npar->discard_tagged = esw_cfg.discard_tagged;
+ npar->promisc_mode = esw_cfg.promisc_mode;
+ npar->offload_flags = esw_cfg.offload_flags;
+ }
+
+ return 0;
+}
+
+
+static int
+qlcnic_reset_eswitch_config(struct qlcnic_adapter *adapter,
+ struct qlcnic_npar_info *npar, int pci_func)
+{
+ struct qlcnic_esw_func_cfg esw_cfg;
+ esw_cfg.op_mode = QLCNIC_PORT_DEFAULTS;
+ esw_cfg.pci_func = pci_func;
+ esw_cfg.vlan_id = npar->pvid;
+ esw_cfg.mac_override = npar->mac_override;
+ esw_cfg.discard_tagged = npar->discard_tagged;
+ esw_cfg.mac_anti_spoof = npar->mac_anti_spoof;
+ esw_cfg.offload_flags = npar->offload_flags;
+ esw_cfg.promisc_mode = npar->promisc_mode;
+ if (qlcnic_config_switch_port(adapter, &esw_cfg))
+ return -EIO;
+
+ esw_cfg.op_mode = QLCNIC_ADD_VLAN;
+ if (qlcnic_config_switch_port(adapter, &esw_cfg))
+ return -EIO;
+
+ return 0;
+}
+
+int qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
+{
+ int i, err;
+ struct qlcnic_npar_info *npar;
+ struct qlcnic_info nic_info;
+ u8 pci_func;
+
+ if (qlcnic_82xx_check(adapter))
+ if (!adapter->need_fw_reset)
+ return 0;
+
+ /* Set the NPAR config data after FW reset */
+ for (i = 0; i < adapter->ahw->total_nic_func; i++) {
+ npar = &adapter->npars[i];
+ pci_func = npar->pci_func;
+ if (!adapter->npars[i].eswitch_status)
+ continue;
+
+ memset(&nic_info, 0, sizeof(struct qlcnic_info));
+ err = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
+ if (err)
+ return err;
+ nic_info.min_tx_bw = npar->min_bw;
+ nic_info.max_tx_bw = npar->max_bw;
+ err = qlcnic_set_nic_info(adapter, &nic_info);
+ if (err)
+ return err;
+
+ if (npar->enable_pm) {
+ err = qlcnic_config_port_mirroring(adapter,
+ npar->dest_npar, 1,
+ pci_func);
+ if (err)
+ return err;
+ }
+ err = qlcnic_reset_eswitch_config(adapter, npar, pci_func);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+static int qlcnic_check_npar_opertional(struct qlcnic_adapter *adapter)
+{
+ u8 npar_opt_timeo = QLCNIC_DEV_NPAR_OPER_TIMEO;
+ u32 npar_state;
+
+ if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
+ return 0;
+
+ npar_state = QLC_SHARED_REG_RD32(adapter,
+ QLCNIC_CRB_DEV_NPAR_STATE);
+ while (npar_state != QLCNIC_DEV_NPAR_OPER && --npar_opt_timeo) {
+ msleep(1000);
+ npar_state = QLC_SHARED_REG_RD32(adapter,
+ QLCNIC_CRB_DEV_NPAR_STATE);
+ }
+ if (!npar_opt_timeo) {
+ dev_err(&adapter->pdev->dev,
+ "Waiting for NPAR state to operational timeout\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+static int
+qlcnic_set_mgmt_operations(struct qlcnic_adapter *adapter)
+{
+ int err;
+
+ if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
+ adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
+ return 0;
+
+ err = qlcnic_set_default_offload_settings(adapter);
+ if (err)
+ return err;
+
+ err = qlcnic_reset_npar_config(adapter);
+ if (err)
+ return err;
+
+ qlcnic_dev_set_npar_ready(adapter);
+
+ return err;
+}
+
+static int qlcnic_82xx_start_firmware(struct qlcnic_adapter *adapter)
+{
+ int err;
+
+ err = qlcnic_can_start_firmware(adapter);
+ if (err < 0)
+ return err;
+ else if (!err)
+ goto check_fw_status;
+
+ if (qlcnic_load_fw_file)
+ qlcnic_request_firmware(adapter);
+ else {
+ err = qlcnic_check_flash_fw_ver(adapter);
+ if (err)
+ goto err_out;
+
+ adapter->ahw->fw_type = QLCNIC_FLASH_ROMIMAGE;
+ }
+
+ err = qlcnic_need_fw_reset(adapter);
+ if (err == 0)
+ goto check_fw_status;
+
+ err = qlcnic_pinit_from_rom(adapter);
+ if (err)
+ goto err_out;
+
+ err = qlcnic_load_firmware(adapter);
+ if (err)
+ goto err_out;
+
+ qlcnic_release_firmware(adapter);
+ QLCWR32(adapter, CRB_DRIVER_VERSION, QLCNIC_DRIVER_VERSION);
+
+check_fw_status:
+ err = qlcnic_check_fw_status(adapter);
+ if (err)
+ goto err_out;
+
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
+ qlcnic_idc_debug_info(adapter, 1);
+ err = qlcnic_check_eswitch_mode(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Memory allocation failed for eswitch\n");
+ goto err_out;
+ }
+ err = qlcnic_set_mgmt_operations(adapter);
+ if (err)
+ goto err_out;
+
+ qlcnic_check_options(adapter);
+ adapter->need_fw_reset = 0;
+
+ qlcnic_release_firmware(adapter);
+ return 0;
+
+err_out:
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
+ dev_err(&adapter->pdev->dev, "Device state set to failed\n");
+
+ qlcnic_release_firmware(adapter);
+ return err;
+}
+
+static int
+qlcnic_request_irq(struct qlcnic_adapter *adapter)
+{
+ irq_handler_t handler;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
+ int err, ring, num_sds_rings;
+
+ unsigned long flags = 0;
+ struct net_device *netdev = adapter->netdev;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+
+ if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
+ if (qlcnic_82xx_check(adapter))
+ handler = qlcnic_tmp_intr;
+ else
+ handler = qlcnic_83xx_tmp_intr;
+ if (!QLCNIC_IS_MSI_FAMILY(adapter))
+ flags |= IRQF_SHARED;
+
+ } else {
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ handler = qlcnic_msix_intr;
+ else if (adapter->flags & QLCNIC_MSI_ENABLED)
+ handler = qlcnic_msi_intr;
+ else {
+ flags |= IRQF_SHARED;
+ if (qlcnic_82xx_check(adapter))
+ handler = qlcnic_intr;
+ else
+ handler = qlcnic_83xx_intr;
+ }
+ }
+ adapter->irq = netdev->irq;
+
+ if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST) {
+ if (qlcnic_82xx_check(adapter) ||
+ (qlcnic_83xx_check(adapter) &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED))) {
+ num_sds_rings = adapter->drv_sds_rings;
+ for (ring = 0; ring < num_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ if (qlcnic_82xx_check(adapter) &&
+ !qlcnic_check_multi_tx(adapter) &&
+ (ring == (num_sds_rings - 1))) {
+ if (!(adapter->flags &
+ QLCNIC_MSIX_ENABLED))
+ snprintf(sds_ring->name,
+ sizeof(sds_ring->name),
+ "qlcnic");
+ else
+ snprintf(sds_ring->name,
+ sizeof(sds_ring->name),
+ "%s-tx-0-rx-%d",
+ netdev->name, ring);
+ } else {
+ snprintf(sds_ring->name,
+ sizeof(sds_ring->name),
+ "%s-rx-%d",
+ netdev->name, ring);
+ }
+ err = request_irq(sds_ring->irq, handler, flags,
+ sds_ring->name, sds_ring);
+ if (err)
+ return err;
+ }
+ }
+ if ((qlcnic_82xx_check(adapter) &&
+ qlcnic_check_multi_tx(adapter)) ||
+ (qlcnic_83xx_check(adapter) &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED) &&
+ !(adapter->flags & QLCNIC_TX_INTR_SHARED))) {
+ handler = qlcnic_msix_tx_intr;
+ for (ring = 0; ring < adapter->drv_tx_rings;
+ ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ snprintf(tx_ring->name, sizeof(tx_ring->name),
+ "%s-tx-%d", netdev->name, ring);
+ err = request_irq(tx_ring->irq, handler, flags,
+ tx_ring->name, tx_ring);
+ if (err)
+ return err;
+ }
+ }
+ }
+ return 0;
+}
+
+static void
+qlcnic_free_irq(struct qlcnic_adapter *adapter)
+{
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
+
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+
+ if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST) {
+ if (qlcnic_82xx_check(adapter) ||
+ (qlcnic_83xx_check(adapter) &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED))) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ free_irq(sds_ring->irq, sds_ring);
+ }
+ }
+ if ((qlcnic_83xx_check(adapter) &&
+ !(adapter->flags & QLCNIC_TX_INTR_SHARED)) ||
+ (qlcnic_82xx_check(adapter) &&
+ qlcnic_check_multi_tx(adapter))) {
+ for (ring = 0; ring < adapter->drv_tx_rings;
+ ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ if (tx_ring->irq)
+ free_irq(tx_ring->irq, tx_ring);
+ }
+ }
+ }
+}
+
+static void qlcnic_get_lro_mss_capability(struct qlcnic_adapter *adapter)
+{
+ u32 capab = 0;
+
+ if (qlcnic_82xx_check(adapter)) {
+ if (adapter->ahw->extra_capability[0] &
+ QLCNIC_FW_CAPABILITY_2_LRO_MAX_TCP_SEG)
+ adapter->flags |= QLCNIC_FW_LRO_MSS_CAP;
+ } else {
+ capab = adapter->ahw->capabilities;
+ if (QLC_83XX_GET_FW_LRO_MSS_CAPABILITY(capab))
+ adapter->flags |= QLCNIC_FW_LRO_MSS_CAP;
+ }
+}
+
+static int qlcnic_config_def_intr_coalesce(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int err;
+
+ /* Initialize interrupt coalesce parameters */
+ ahw->coal.flag = QLCNIC_INTR_DEFAULT;
+
+ if (qlcnic_83xx_check(adapter)) {
+ ahw->coal.type = QLCNIC_INTR_COAL_TYPE_RX_TX;
+ ahw->coal.tx_time_us = QLCNIC_DEF_INTR_COALESCE_TX_TIME_US;
+ ahw->coal.tx_packets = QLCNIC_DEF_INTR_COALESCE_TX_PACKETS;
+ ahw->coal.rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US;
+ ahw->coal.rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS;
+
+ err = qlcnic_83xx_set_rx_tx_intr_coal(adapter);
+ } else {
+ ahw->coal.type = QLCNIC_INTR_COAL_TYPE_RX;
+ ahw->coal.rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US;
+ ahw->coal.rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS;
+
+ err = qlcnic_82xx_set_rx_coalesce(adapter);
+ }
+
+ return err;
+}
+
+int __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
+{
+ int ring;
+ struct qlcnic_host_rds_ring *rds_ring;
+
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return -EIO;
+
+ if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
+ return 0;
+
+ if (qlcnic_set_eswitch_port_config(adapter))
+ return -EIO;
+
+ qlcnic_get_lro_mss_capability(adapter);
+
+ if (qlcnic_fw_create_ctx(adapter))
+ return -EIO;
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &adapter->recv_ctx->rds_rings[ring];
+ qlcnic_post_rx_buffers(adapter, rds_ring, ring);
+ }
+
+ qlcnic_set_multi(netdev);
+ qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu);
+
+ adapter->ahw->linkup = 0;
+
+ if (adapter->drv_sds_rings > 1)
+ qlcnic_config_rss(adapter, 1);
+
+ qlcnic_config_def_intr_coalesce(adapter);
+
+ if (netdev->features & NETIF_F_LRO)
+ qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED);
+
+ set_bit(__QLCNIC_DEV_UP, &adapter->state);
+ qlcnic_napi_enable(adapter);
+
+ qlcnic_linkevent_request(adapter, 1);
+
+ adapter->ahw->reset_context = 0;
+ netif_tx_start_all_queues(netdev);
+ return 0;
+}
+
+int qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
+{
+ int err = 0;
+
+ rtnl_lock();
+ if (netif_running(netdev))
+ err = __qlcnic_up(adapter, netdev);
+ rtnl_unlock();
+
+ return err;
+}
+
+void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
+{
+ int ring;
+
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return;
+
+ if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state))
+ return;
+
+ smp_mb();
+ netif_carrier_off(netdev);
+ adapter->ahw->linkup = 0;
+ netif_tx_disable(netdev);
+
+ qlcnic_free_mac_list(adapter);
+
+ if (adapter->fhash.fnum)
+ qlcnic_delete_lb_filters(adapter);
+
+ qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE);
+ if (qlcnic_sriov_vf_check(adapter))
+ qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc);
+
+ qlcnic_napi_disable(adapter);
+
+ qlcnic_fw_destroy_ctx(adapter);
+ adapter->flags &= ~QLCNIC_FW_LRO_MSS_CAP;
+
+ qlcnic_reset_rx_buffers_list(adapter);
+
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++)
+ qlcnic_release_tx_buffers(adapter, &adapter->tx_ring[ring]);
+}
+
+/* Usage: During suspend and firmware recovery module */
+
+void qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
+{
+ rtnl_lock();
+ if (netif_running(netdev))
+ __qlcnic_down(adapter, netdev);
+ rtnl_unlock();
+
+}
+
+int
+qlcnic_attach(struct qlcnic_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ int err;
+
+ if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC)
+ return 0;
+
+ err = qlcnic_napi_add(adapter, netdev);
+ if (err)
+ return err;
+
+ err = qlcnic_alloc_sw_resources(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Error in setting sw resources\n");
+ goto err_out_napi_del;
+ }
+
+ err = qlcnic_alloc_hw_resources(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Error in setting hw resources\n");
+ goto err_out_free_sw;
+ }
+
+ err = qlcnic_request_irq(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "failed to setup interrupt\n");
+ goto err_out_free_hw;
+ }
+
+ qlcnic_create_sysfs_entries(adapter);
+
+ if (qlcnic_encap_rx_offload(adapter))
+ udp_tunnel_nic_reset_ntf(netdev);
+
+ adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
+ return 0;
+
+err_out_free_hw:
+ qlcnic_free_hw_resources(adapter);
+err_out_free_sw:
+ qlcnic_free_sw_resources(adapter);
+err_out_napi_del:
+ qlcnic_napi_del(adapter);
+ return err;
+}
+
+void qlcnic_detach(struct qlcnic_adapter *adapter)
+{
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return;
+
+ qlcnic_remove_sysfs_entries(adapter);
+
+ qlcnic_free_hw_resources(adapter);
+ qlcnic_release_rx_buffers(adapter);
+ qlcnic_free_irq(adapter);
+ qlcnic_napi_del(adapter);
+ qlcnic_free_sw_resources(adapter);
+
+ adapter->is_up = 0;
+}
+
+void qlcnic_diag_free_res(struct net_device *netdev, int drv_sds_rings)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_host_sds_ring *sds_ring;
+ int drv_tx_rings = adapter->drv_tx_rings;
+ int ring;
+
+ clear_bit(__QLCNIC_DEV_UP, &adapter->state);
+ if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+ sds_ring = &adapter->recv_ctx->sds_rings[ring];
+ qlcnic_disable_sds_intr(adapter, sds_ring);
+ }
+ }
+
+ qlcnic_fw_destroy_ctx(adapter);
+
+ qlcnic_detach(adapter);
+
+ adapter->ahw->diag_test = 0;
+ adapter->drv_sds_rings = drv_sds_rings;
+ adapter->drv_tx_rings = drv_tx_rings;
+
+ if (qlcnic_attach(adapter))
+ goto out;
+
+ if (netif_running(netdev))
+ __qlcnic_up(adapter, netdev);
+out:
+ netif_device_attach(netdev);
+}
+
+static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int err = 0;
+
+ adapter->recv_ctx = kzalloc(sizeof(struct qlcnic_recv_context),
+ GFP_KERNEL);
+ if (!adapter->recv_ctx) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ if (qlcnic_83xx_check(adapter)) {
+ ahw->coal.type = QLCNIC_INTR_COAL_TYPE_RX_TX;
+ ahw->coal.tx_time_us = QLCNIC_DEF_INTR_COALESCE_TX_TIME_US;
+ ahw->coal.tx_packets = QLCNIC_DEF_INTR_COALESCE_TX_PACKETS;
+ ahw->coal.rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US;
+ ahw->coal.rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS;
+ } else {
+ ahw->coal.type = QLCNIC_INTR_COAL_TYPE_RX;
+ ahw->coal.rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US;
+ ahw->coal.rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS;
+ }
+
+ /* clear stats */
+ memset(&adapter->stats, 0, sizeof(adapter->stats));
+err_out:
+ return err;
+}
+
+static void qlcnic_free_adapter_resources(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+
+ kfree(adapter->recv_ctx);
+ adapter->recv_ctx = NULL;
+
+ if (fw_dump->tmpl_hdr) {
+ vfree(fw_dump->tmpl_hdr);
+ fw_dump->tmpl_hdr = NULL;
+ }
+
+ if (fw_dump->dma_buffer) {
+ dma_free_coherent(&adapter->pdev->dev, QLC_PEX_DMA_READ_SIZE,
+ fw_dump->dma_buffer, fw_dump->phys_addr);
+ fw_dump->dma_buffer = NULL;
+ }
+
+ kfree(adapter->ahw->reset.buff);
+ adapter->ahw->fw_dump.tmpl_hdr = NULL;
+}
+
+int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_rds_ring *rds_ring;
+ int ring;
+ int ret;
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev))
+ __qlcnic_down(adapter, netdev);
+
+ qlcnic_detach(adapter);
+
+ adapter->drv_sds_rings = QLCNIC_SINGLE_RING;
+ adapter->ahw->diag_test = test;
+ adapter->ahw->linkup = 0;
+
+ ret = qlcnic_attach(adapter);
+ if (ret) {
+ netif_device_attach(netdev);
+ return ret;
+ }
+
+ ret = qlcnic_fw_create_ctx(adapter);
+ if (ret) {
+ qlcnic_detach(adapter);
+ netif_device_attach(netdev);
+ return ret;
+ }
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &adapter->recv_ctx->rds_rings[ring];
+ qlcnic_post_rx_buffers(adapter, rds_ring, ring);
+ }
+
+ if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+ sds_ring = &adapter->recv_ctx->sds_rings[ring];
+ qlcnic_enable_sds_intr(adapter, sds_ring);
+ }
+ }
+
+ if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
+ adapter->ahw->loopback_state = 0;
+ qlcnic_linkevent_request(adapter, 1);
+ }
+
+ set_bit(__QLCNIC_DEV_UP, &adapter->state);
+
+ return 0;
+}
+
+/* Reset context in hardware only */
+static int
+qlcnic_reset_hw_context(struct qlcnic_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ return -EBUSY;
+
+ netif_device_detach(netdev);
+
+ qlcnic_down(adapter, netdev);
+
+ qlcnic_up(adapter, netdev);
+
+ netif_device_attach(netdev);
+
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ netdev_info(adapter->netdev, "%s: soft reset complete\n", __func__);
+ return 0;
+}
+
+int
+qlcnic_reset_context(struct qlcnic_adapter *adapter)
+{
+ int err = 0;
+ struct net_device *netdev = adapter->netdev;
+
+ if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ return -EBUSY;
+
+ if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) {
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev))
+ __qlcnic_down(adapter, netdev);
+
+ qlcnic_detach(adapter);
+
+ if (netif_running(netdev)) {
+ err = qlcnic_attach(adapter);
+ if (!err) {
+ __qlcnic_up(adapter, netdev);
+ qlcnic_restore_indev_addr(netdev, NETDEV_UP);
+ }
+ }
+
+ netif_device_attach(netdev);
+ }
+
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return err;
+}
+
+static void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u16 act_pci_fn = ahw->total_nic_func;
+ u16 count;
+
+ ahw->max_mc_count = QLCNIC_MAX_MC_COUNT;
+ if (act_pci_fn <= 2)
+ count = (QLCNIC_MAX_UC_COUNT - QLCNIC_MAX_MC_COUNT) /
+ act_pci_fn;
+ else
+ count = (QLCNIC_LB_MAX_FILTERS - QLCNIC_MAX_MC_COUNT) /
+ act_pci_fn;
+ ahw->max_uc_count = count;
+}
+
+static int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter,
+ u8 tx_queues, u8 rx_queues)
+{
+ struct net_device *netdev = adapter->netdev;
+ int err = 0;
+
+ if (tx_queues) {
+ err = netif_set_real_num_tx_queues(netdev, tx_queues);
+ if (err) {
+ netdev_err(netdev, "failed to set %d Tx queues\n",
+ tx_queues);
+ return err;
+ }
+ }
+
+ if (rx_queues) {
+ err = netif_set_real_num_rx_queues(netdev, rx_queues);
+ if (err)
+ netdev_err(netdev, "failed to set %d Rx queues\n",
+ rx_queues);
+ }
+
+ return err;
+}
+
+int
+qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev)
+{
+ int err;
+ struct pci_dev *pdev = adapter->pdev;
+
+ adapter->rx_csum = 1;
+ adapter->ahw->mc_enabled = 0;
+ qlcnic_set_mac_filter_count(adapter);
+
+ netdev->netdev_ops = &qlcnic_netdev_ops;
+ netdev->watchdog_timeo = QLCNIC_WATCHDOG_TIMEOUTVALUE * HZ;
+
+ qlcnic_change_mtu(netdev, netdev->mtu);
+
+ netdev->ethtool_ops = (qlcnic_sriov_vf_check(adapter)) ?
+ &qlcnic_sriov_vf_ethtool_ops : &qlcnic_ethtool_ops;
+
+ netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
+ NETIF_F_IPV6_CSUM | NETIF_F_GRO |
+ NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HIGHDMA);
+ netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA);
+
+ if (QLCNIC_IS_TSO_CAPABLE(adapter)) {
+ netdev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
+ netdev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
+ }
+
+ if (qlcnic_vlan_tx_check(adapter))
+ netdev->features |= (NETIF_F_HW_VLAN_CTAG_TX);
+
+ if (qlcnic_sriov_vf_check(adapter))
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
+ netdev->features |= NETIF_F_LRO;
+
+ if (qlcnic_encap_tx_offload(adapter)) {
+ netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
+
+ /* encapsulation Tx offload supported by Adapter */
+ netdev->hw_enc_features = NETIF_F_IP_CSUM |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_TSO |
+ NETIF_F_TSO6;
+ }
+
+ if (qlcnic_encap_rx_offload(adapter)) {
+ netdev->hw_enc_features |= NETIF_F_RXCSUM;
+
+ netdev->udp_tunnel_nic_info = &qlcnic_udp_tunnels;
+ }
+
+ netdev->hw_features = netdev->features;
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+ netdev->irq = adapter->msix_entries[0].vector;
+
+ /* MTU range: 68 - 9600 */
+ netdev->min_mtu = P3P_MIN_MTU;
+ netdev->max_mtu = P3P_MAX_MTU;
+
+ err = qlcnic_set_real_num_queues(adapter, adapter->drv_tx_rings,
+ adapter->drv_sds_rings);
+ if (err)
+ return err;
+
+ qlcnic_dcb_init_dcbnl_ops(adapter->dcb);
+
+ err = register_netdev(netdev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to register net device\n");
+ return err;
+ }
+
+ return 0;
+}
+
+void qlcnic_free_tx_rings(struct qlcnic_adapter *adapter)
+{
+ int ring;
+ struct qlcnic_host_tx_ring *tx_ring;
+
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ if (tx_ring) {
+ vfree(tx_ring->cmd_buf_arr);
+ tx_ring->cmd_buf_arr = NULL;
+ }
+ }
+ kfree(adapter->tx_ring);
+}
+
+int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter,
+ struct net_device *netdev)
+{
+ int ring, vector, index;
+ struct qlcnic_host_tx_ring *tx_ring;
+ struct qlcnic_cmd_buffer *cmd_buf_arr;
+
+ tx_ring = kcalloc(adapter->drv_tx_rings,
+ sizeof(struct qlcnic_host_tx_ring), GFP_KERNEL);
+ if (tx_ring == NULL)
+ return -ENOMEM;
+
+ adapter->tx_ring = tx_ring;
+
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ tx_ring->num_desc = adapter->num_txd;
+ tx_ring->txq = netdev_get_tx_queue(netdev, ring);
+ cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring));
+ if (cmd_buf_arr == NULL) {
+ qlcnic_free_tx_rings(adapter);
+ return -ENOMEM;
+ }
+ tx_ring->cmd_buf_arr = cmd_buf_arr;
+ spin_lock_init(&tx_ring->tx_clean_lock);
+ }
+
+ if (qlcnic_83xx_check(adapter) ||
+ (qlcnic_82xx_check(adapter) && qlcnic_check_multi_tx(adapter))) {
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ tx_ring->adapter = adapter;
+ if (adapter->flags & QLCNIC_MSIX_ENABLED) {
+ index = adapter->drv_sds_rings + ring;
+ vector = adapter->msix_entries[index].vector;
+ tx_ring->irq = vector;
+ }
+ }
+ }
+
+ return 0;
+}
+
+void qlcnic_set_drv_version(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u32 fw_cmd = 0;
+
+ if (qlcnic_82xx_check(adapter))
+ fw_cmd = QLCNIC_CMD_82XX_SET_DRV_VER;
+ else if (qlcnic_83xx_check(adapter))
+ fw_cmd = QLCNIC_CMD_83XX_SET_DRV_VER;
+
+ if (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_SET_DRV_VER)
+ qlcnic_fw_cmd_set_drv_version(adapter, fw_cmd);
+}
+
+/* Reset firmware API lock */
+static void qlcnic_reset_api_lock(struct qlcnic_adapter *adapter)
+{
+ qlcnic_api_lock(adapter);
+ qlcnic_api_unlock(adapter);
+}
+
+
+static int
+qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct net_device *netdev = NULL;
+ struct qlcnic_adapter *adapter = NULL;
+ struct qlcnic_hardware_context *ahw;
+ char board_name[QLCNIC_MAX_BOARD_NAME_LEN + 19]; /* MAC + ": " + name */
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+ err = -ENODEV;
+ goto err_out_disable_pdev;
+ }
+
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(&pdev->dev, "Unable to set DMA mask, aborting\n");
+ goto err_out_disable_pdev;
+ }
+
+ err = pci_request_regions(pdev, qlcnic_driver_name);
+ if (err)
+ goto err_out_disable_pdev;
+
+ pci_set_master(pdev);
+ pci_enable_pcie_error_reporting(pdev);
+
+ ahw = kzalloc(sizeof(struct qlcnic_hardware_context), GFP_KERNEL);
+ if (!ahw) {
+ err = -ENOMEM;
+ goto err_out_free_res;
+ }
+
+ switch (ent->device) {
+ case PCI_DEVICE_ID_QLOGIC_QLE824X:
+ ahw->hw_ops = &qlcnic_hw_ops;
+ ahw->reg_tbl = (u32 *) qlcnic_reg_tbl;
+ break;
+ case PCI_DEVICE_ID_QLOGIC_QLE834X:
+ case PCI_DEVICE_ID_QLOGIC_QLE8830:
+ case PCI_DEVICE_ID_QLOGIC_QLE844X:
+ qlcnic_83xx_register_map(ahw);
+ break;
+ case PCI_DEVICE_ID_QLOGIC_VF_QLE834X:
+ case PCI_DEVICE_ID_QLOGIC_VF_QLE8C30:
+ case PCI_DEVICE_ID_QLOGIC_VF_QLE844X:
+ qlcnic_sriov_vf_register_map(ahw);
+ break;
+ default:
+ err = -EINVAL;
+ goto err_out_free_hw_res;
+ }
+
+ err = qlcnic_setup_pci_map(pdev, ahw);
+ if (err)
+ goto err_out_free_hw_res;
+
+ netdev = alloc_etherdev_mq(sizeof(struct qlcnic_adapter),
+ QLCNIC_MAX_TX_RINGS);
+ if (!netdev) {
+ err = -ENOMEM;
+ goto err_out_iounmap;
+ }
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ adapter = netdev_priv(netdev);
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
+ adapter->ahw = ahw;
+
+ adapter->qlcnic_wq = create_singlethread_workqueue("qlcnic");
+ if (adapter->qlcnic_wq == NULL) {
+ err = -ENOMEM;
+ dev_err(&pdev->dev, "Failed to create workqueue\n");
+ goto err_out_free_netdev;
+ }
+
+ err = qlcnic_alloc_adapter_resources(adapter);
+ if (err)
+ goto err_out_free_wq;
+
+ adapter->dev_rst_time = jiffies;
+ ahw->revision_id = pdev->revision;
+ ahw->max_vnic_func = qlcnic_get_vnic_func_count(adapter);
+ if (qlcnic_mac_learn == FDB_MAC_LEARN)
+ adapter->fdb_mac_learn = true;
+ else if (qlcnic_mac_learn == DRV_MAC_LEARN)
+ adapter->drv_mac_learn = true;
+
+ rwlock_init(&adapter->ahw->crb_lock);
+ mutex_init(&adapter->ahw->mem_lock);
+
+ INIT_LIST_HEAD(&adapter->mac_list);
+
+ qlcnic_register_dcb(adapter);
+
+ if (qlcnic_82xx_check(adapter)) {
+ qlcnic_check_vf(adapter, ent);
+ adapter->portnum = adapter->ahw->pci_func;
+ qlcnic_reset_api_lock(adapter);
+ err = qlcnic_start_firmware(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n"
+ "\t\tIf reboot doesn't help, try flashing the card\n");
+ goto err_out_maintenance_mode;
+ }
+
+ /* compute and set default and max tx/sds rings */
+ if (adapter->ahw->msix_supported) {
+ if (qlcnic_check_multi_tx_capability(adapter) == 1)
+ qlcnic_set_tx_ring_count(adapter,
+ QLCNIC_SINGLE_RING);
+ else
+ qlcnic_set_tx_ring_count(adapter,
+ QLCNIC_DEF_TX_RINGS);
+ qlcnic_set_sds_ring_count(adapter,
+ QLCNIC_DEF_SDS_RINGS);
+ } else {
+ qlcnic_set_tx_ring_count(adapter, QLCNIC_SINGLE_RING);
+ qlcnic_set_sds_ring_count(adapter, QLCNIC_SINGLE_RING);
+ }
+
+ err = qlcnic_setup_idc_param(adapter);
+ if (err)
+ goto err_out_free_hw;
+
+ adapter->flags |= QLCNIC_NEED_FLR;
+
+ } else if (qlcnic_83xx_check(adapter)) {
+ qlcnic_83xx_check_vf(adapter, ent);
+ adapter->portnum = adapter->ahw->pci_func;
+ err = qlcnic_83xx_init(adapter);
+ if (err) {
+ switch (err) {
+ case -ENOTRECOVERABLE:
+ dev_err(&pdev->dev, "Adapter initialization failed due to a faulty hardware\n");
+ dev_err(&pdev->dev, "Please replace the adapter with new one and return the faulty adapter for repair\n");
+ goto err_out_free_hw;
+ case -ENOMEM:
+ dev_err(&pdev->dev, "Adapter initialization failed. Please reboot\n");
+ goto err_out_free_hw;
+ case -EOPNOTSUPP:
+ dev_err(&pdev->dev, "Adapter initialization failed\n");
+ goto err_out_free_hw;
+ default:
+ dev_err(&pdev->dev, "Adapter initialization failed. Driver will load in maintenance mode to recover the adapter using the application\n");
+ goto err_out_maintenance_mode;
+ }
+ }
+
+ if (qlcnic_sriov_vf_check(adapter))
+ return 0;
+ } else {
+ dev_err(&pdev->dev,
+ "%s: failed. Please Reboot\n", __func__);
+ err = -ENODEV;
+ goto err_out_free_hw;
+ }
+
+ if (qlcnic_read_mac_addr(adapter))
+ dev_warn(&pdev->dev, "failed to read mac addr\n");
+
+ qlcnic_read_phys_port_id(adapter);
+
+ if (adapter->portnum == 0) {
+ qlcnic_get_board_name(adapter, board_name);
+
+ pr_info("%s: %s Board Chip rev 0x%x\n",
+ module_name(THIS_MODULE),
+ board_name, adapter->ahw->revision_id);
+ }
+
+ if (qlcnic_83xx_check(adapter) && !qlcnic_use_msi_x &&
+ !!qlcnic_use_msi)
+ dev_warn(&pdev->dev,
+ "Device does not support MSI interrupts\n");
+
+ if (qlcnic_82xx_check(adapter)) {
+ err = qlcnic_dcb_enable(adapter->dcb);
+ if (err) {
+ qlcnic_dcb_free(adapter->dcb);
+ dev_err(&pdev->dev, "Failed to enable DCB\n");
+ goto err_out_free_hw;
+ }
+
+ qlcnic_dcb_get_info(adapter->dcb);
+ err = qlcnic_setup_intr(adapter);
+
+ if (err) {
+ dev_err(&pdev->dev, "Failed to setup interrupt\n");
+ goto err_out_disable_msi;
+ }
+ }
+
+ err = qlcnic_get_act_pci_func(adapter);
+ if (err)
+ goto err_out_disable_mbx_intr;
+
+ if (adapter->portnum == 0)
+ qlcnic_set_drv_version(adapter);
+
+ err = qlcnic_setup_netdev(adapter, netdev);
+ if (err)
+ goto err_out_disable_mbx_intr;
+
+ pci_set_drvdata(pdev, adapter);
+
+ if (qlcnic_82xx_check(adapter))
+ qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
+ FW_POLL_DELAY);
+
+ switch (adapter->ahw->port_type) {
+ case QLCNIC_GBE:
+ dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
+ adapter->netdev->name);
+ break;
+ case QLCNIC_XGBE:
+ dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
+ adapter->netdev->name);
+ break;
+ }
+
+ if (adapter->drv_mac_learn)
+ qlcnic_alloc_lb_filters_mem(adapter);
+
+ qlcnic_add_sysfs(adapter);
+ qlcnic_register_hwmon_dev(adapter);
+ return 0;
+
+err_out_disable_mbx_intr:
+ if (qlcnic_83xx_check(adapter))
+ qlcnic_83xx_free_mbx_intr(adapter);
+
+err_out_disable_msi:
+ qlcnic_teardown_intr(adapter);
+ qlcnic_cancel_idc_work(adapter);
+ qlcnic_clr_all_drv_state(adapter, 0);
+
+err_out_free_hw:
+ qlcnic_free_adapter_resources(adapter);
+
+err_out_free_wq:
+ destroy_workqueue(adapter->qlcnic_wq);
+
+err_out_free_netdev:
+ free_netdev(netdev);
+
+err_out_iounmap:
+ qlcnic_cleanup_pci_map(ahw);
+
+err_out_free_hw_res:
+ kfree(ahw);
+
+err_out_free_res:
+ pci_disable_pcie_error_reporting(pdev);
+ pci_release_regions(pdev);
+
+err_out_disable_pdev:
+ pci_disable_device(pdev);
+ return err;
+
+err_out_maintenance_mode:
+ set_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state);
+ netdev->netdev_ops = &qlcnic_netdev_failed_ops;
+ netdev->ethtool_ops = &qlcnic_ethtool_failed_ops;
+ ahw->port_type = QLCNIC_XGBE;
+
+ if (qlcnic_83xx_check(adapter))
+ adapter->tgt_status_reg = NULL;
+ else
+ ahw->board_type = QLCNIC_BRDTYPE_P3P_10G_SFP_PLUS;
+
+ err = register_netdev(netdev);
+
+ if (err) {
+ dev_err(&pdev->dev, "Failed to register net device\n");
+ qlcnic_clr_all_drv_state(adapter, 0);
+ goto err_out_free_hw;
+ }
+
+ pci_set_drvdata(pdev, adapter);
+ qlcnic_add_sysfs(adapter);
+
+ return 0;
+}
+
+static void qlcnic_remove(struct pci_dev *pdev)
+{
+ struct qlcnic_adapter *adapter;
+ struct net_device *netdev;
+ struct qlcnic_hardware_context *ahw;
+
+ adapter = pci_get_drvdata(pdev);
+ if (adapter == NULL)
+ return;
+
+ netdev = adapter->netdev;
+
+ qlcnic_cancel_idc_work(adapter);
+ qlcnic_sriov_pf_disable(adapter);
+ ahw = adapter->ahw;
+
+ unregister_netdev(netdev);
+ qlcnic_sriov_cleanup(adapter);
+
+ if (qlcnic_83xx_check(adapter)) {
+ qlcnic_83xx_initialize_nic(adapter, 0);
+ cancel_delayed_work_sync(&adapter->idc_aen_work);
+ qlcnic_83xx_free_mbx_intr(adapter);
+ qlcnic_83xx_detach_mailbox_work(adapter);
+ qlcnic_83xx_free_mailbox(ahw->mailbox);
+ kfree(ahw->fw_info);
+ }
+
+ qlcnic_dcb_free(adapter->dcb);
+ qlcnic_detach(adapter);
+ kfree(adapter->npars);
+ kfree(adapter->eswitch);
+
+ if (qlcnic_82xx_check(adapter))
+ qlcnic_clr_all_drv_state(adapter, 0);
+
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+
+ qlcnic_free_lb_filters_mem(adapter);
+
+ qlcnic_teardown_intr(adapter);
+
+ qlcnic_remove_sysfs(adapter);
+
+ qlcnic_unregister_hwmon_dev(adapter);
+
+ qlcnic_cleanup_pci_map(adapter->ahw);
+
+ qlcnic_release_firmware(adapter);
+
+ pci_disable_pcie_error_reporting(pdev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+
+ if (adapter->qlcnic_wq) {
+ destroy_workqueue(adapter->qlcnic_wq);
+ adapter->qlcnic_wq = NULL;
+ }
+
+ qlcnic_free_adapter_resources(adapter);
+ kfree(ahw);
+ free_netdev(netdev);
+}
+
+static void qlcnic_shutdown(struct pci_dev *pdev)
+{
+ if (__qlcnic_shutdown(pdev))
+ return;
+
+ pci_disable_device(pdev);
+}
+
+static int __maybe_unused qlcnic_suspend(struct device *dev_d)
+{
+ return __qlcnic_shutdown(to_pci_dev(dev_d));
+}
+
+static int __maybe_unused qlcnic_resume(struct device *dev_d)
+{
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev_d);
+
+ return __qlcnic_resume(adapter);
+}
+
+static int qlcnic_open(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int err;
+
+ if (test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state)) {
+ netdev_err(netdev, "%s: Device is in non-operational state\n",
+ __func__);
+
+ return -EIO;
+ }
+
+ netif_carrier_off(netdev);
+
+ err = qlcnic_attach(adapter);
+ if (err)
+ return err;
+
+ err = __qlcnic_up(adapter, netdev);
+ if (err)
+ qlcnic_detach(adapter);
+
+ return err;
+}
+
+/*
+ * qlcnic_close - Disables a network interface entry point
+ */
+static int qlcnic_close(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ __qlcnic_down(adapter, netdev);
+
+ return 0;
+}
+
+#define QLCNIC_VF_LB_BUCKET_SIZE 1
+
+void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter)
+{
+ void *head;
+ int i;
+ struct net_device *netdev = adapter->netdev;
+ u32 filter_size = 0;
+ u16 act_pci_func = 0;
+
+ if (adapter->fhash.fmax && adapter->fhash.fhead)
+ return;
+
+ act_pci_func = adapter->ahw->total_nic_func;
+ spin_lock_init(&adapter->mac_learn_lock);
+ spin_lock_init(&adapter->rx_mac_learn_lock);
+
+ if (qlcnic_sriov_vf_check(adapter)) {
+ filter_size = QLCNIC_83XX_SRIOV_VF_MAX_MAC - 1;
+ adapter->fhash.fbucket_size = QLCNIC_VF_LB_BUCKET_SIZE;
+ } else if (qlcnic_82xx_check(adapter)) {
+ filter_size = QLCNIC_LB_MAX_FILTERS;
+ adapter->fhash.fbucket_size = QLCNIC_LB_BUCKET_SIZE;
+ } else {
+ filter_size = QLC_83XX_LB_MAX_FILTERS;
+ adapter->fhash.fbucket_size = QLC_83XX_LB_BUCKET_SIZE;
+ }
+
+ head = kcalloc(adapter->fhash.fbucket_size,
+ sizeof(struct hlist_head), GFP_ATOMIC);
+
+ if (!head)
+ return;
+
+ adapter->fhash.fmax = (filter_size / act_pci_func);
+ adapter->fhash.fhead = head;
+
+ netdev_info(netdev, "active nic func = %d, mac filter size=%d\n",
+ act_pci_func, adapter->fhash.fmax);
+
+ for (i = 0; i < adapter->fhash.fbucket_size; i++)
+ INIT_HLIST_HEAD(&adapter->fhash.fhead[i]);
+
+ adapter->rx_fhash.fbucket_size = adapter->fhash.fbucket_size;
+
+ head = kcalloc(adapter->rx_fhash.fbucket_size,
+ sizeof(struct hlist_head), GFP_ATOMIC);
+
+ if (!head)
+ return;
+
+ adapter->rx_fhash.fmax = (filter_size / act_pci_func);
+ adapter->rx_fhash.fhead = head;
+
+ for (i = 0; i < adapter->rx_fhash.fbucket_size; i++)
+ INIT_HLIST_HEAD(&adapter->rx_fhash.fhead[i]);
+}
+
+static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter)
+{
+ if (adapter->fhash.fmax)
+ kfree(adapter->fhash.fhead);
+
+ adapter->fhash.fhead = NULL;
+ adapter->fhash.fmax = 0;
+
+ if (adapter->rx_fhash.fmax)
+ kfree(adapter->rx_fhash.fhead);
+
+ adapter->rx_fhash.fmax = 0;
+ adapter->rx_fhash.fhead = NULL;
+}
+
+int qlcnic_check_temp(struct qlcnic_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ u32 temp_state, temp_val, temp = 0;
+ int rv = 0;
+
+ if (qlcnic_83xx_check(adapter))
+ temp = QLCRDX(adapter->ahw, QLC_83XX_ASIC_TEMP);
+
+ if (qlcnic_82xx_check(adapter))
+ temp = QLC_SHARED_REG_RD32(adapter, QLCNIC_ASIC_TEMP);
+
+ temp_state = qlcnic_get_temp_state(temp);
+ temp_val = qlcnic_get_temp_val(temp);
+
+ if (temp_state == QLCNIC_TEMP_PANIC) {
+ dev_err(&netdev->dev,
+ "Device temperature %d degrees C exceeds"
+ " maximum allowed. Hardware has been shut down.\n",
+ temp_val);
+ rv = 1;
+ } else if (temp_state == QLCNIC_TEMP_WARN) {
+ if (adapter->ahw->temp == QLCNIC_TEMP_NORMAL) {
+ dev_err(&netdev->dev,
+ "Device temperature %d degrees C "
+ "exceeds operating range."
+ " Immediate action needed.\n",
+ temp_val);
+ }
+ } else {
+ if (adapter->ahw->temp == QLCNIC_TEMP_WARN) {
+ dev_info(&netdev->dev,
+ "Device temperature is now %d degrees C"
+ " in normal range.\n", temp_val);
+ }
+ }
+ adapter->ahw->temp = temp_state;
+ return rv;
+}
+
+static inline void dump_tx_ring_desc(struct qlcnic_host_tx_ring *tx_ring)
+{
+ int i;
+
+ for (i = 0; i < tx_ring->num_desc; i++) {
+ pr_info("TX Desc: %d\n", i);
+ print_hex_dump(KERN_INFO, "TX: ", DUMP_PREFIX_OFFSET, 16, 1,
+ &tx_ring->desc_head[i],
+ sizeof(struct cmd_desc_type0), true);
+ }
+}
+
+static void qlcnic_dump_rings(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct net_device *netdev = adapter->netdev;
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
+ int ring;
+
+ if (!netdev || !netif_running(netdev))
+ return;
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+ if (!rds_ring)
+ continue;
+ netdev_info(netdev,
+ "rds_ring=%d crb_rcv_producer=%d producer=%u num_desc=%u\n",
+ ring, readl(rds_ring->crb_rcv_producer),
+ rds_ring->producer, rds_ring->num_desc);
+ }
+
+ for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+ sds_ring = &(recv_ctx->sds_rings[ring]);
+ if (!sds_ring)
+ continue;
+ netdev_info(netdev,
+ "sds_ring=%d crb_sts_consumer=%d consumer=%u crb_intr_mask=%d num_desc=%u\n",
+ ring, readl(sds_ring->crb_sts_consumer),
+ sds_ring->consumer, readl(sds_ring->crb_intr_mask),
+ sds_ring->num_desc);
+ }
+
+ for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ if (!tx_ring)
+ continue;
+ netdev_info(netdev, "Tx ring=%d Context Id=0x%x\n",
+ ring, tx_ring->ctx_id);
+ netdev_info(netdev,
+ "xmit_finished=%llu, xmit_called=%llu, xmit_on=%llu, xmit_off=%llu\n",
+ tx_ring->tx_stats.xmit_finished,
+ tx_ring->tx_stats.xmit_called,
+ tx_ring->tx_stats.xmit_on,
+ tx_ring->tx_stats.xmit_off);
+
+ if (tx_ring->crb_intr_mask)
+ netdev_info(netdev, "crb_intr_mask=%d\n",
+ readl(tx_ring->crb_intr_mask));
+
+ netdev_info(netdev,
+ "hw_producer=%d, sw_producer=%d sw_consumer=%d, hw_consumer=%d\n",
+ readl(tx_ring->crb_cmd_producer),
+ tx_ring->producer, tx_ring->sw_consumer,
+ le32_to_cpu(*(tx_ring->hw_consumer)));
+
+ netdev_info(netdev, "Total desc=%d, Available desc=%d\n",
+ tx_ring->num_desc, qlcnic_tx_avail(tx_ring));
+
+ if (netif_msg_tx_err(adapter->ahw))
+ dump_tx_ring_desc(tx_ring);
+ }
+
+}
+
+static void qlcnic_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ if (test_bit(__QLCNIC_RESETTING, &adapter->state))
+ return;
+
+ qlcnic_dump_rings(adapter);
+
+ if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS ||
+ netif_msg_tx_err(adapter->ahw)) {
+ netdev_err(netdev, "Tx timeout, reset the adapter.\n");
+ if (qlcnic_82xx_check(adapter))
+ adapter->need_fw_reset = 1;
+ else if (qlcnic_83xx_check(adapter))
+ qlcnic_83xx_idc_request_reset(adapter,
+ QLCNIC_FORCE_FW_DUMP_KEY);
+ } else {
+ netdev_err(netdev, "Tx timeout, reset adapter context.\n");
+ adapter->ahw->reset_context = 1;
+ }
+}
+
+static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct net_device_stats *stats = &netdev->stats;
+
+ if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
+ qlcnic_update_stats(adapter);
+
+ stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
+ stats->tx_packets = adapter->stats.xmitfinished;
+ stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes;
+ stats->tx_bytes = adapter->stats.txbytes;
+ stats->rx_dropped = adapter->stats.rxdropped;
+ stats->tx_dropped = adapter->stats.txdropped;
+
+ return stats;
+}
+
+static irqreturn_t qlcnic_82xx_clear_legacy_intr(struct qlcnic_adapter *adapter)
+{
+ u32 status;
+
+ status = readl(adapter->isr_int_vec);
+
+ if (!(status & adapter->ahw->int_vec_bit))
+ return IRQ_NONE;
+
+ /* check interrupt state machine, to be sure */
+ status = readl(adapter->crb_int_state_reg);
+ if (!ISR_LEGACY_INT_TRIGGERED(status))
+ return IRQ_NONE;
+
+ writel(0xffffffff, adapter->tgt_status_reg);
+ /* read twice to ensure write is flushed */
+ readl(adapter->isr_int_vec);
+ readl(adapter->isr_int_vec);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qlcnic_tmp_intr(int irq, void *data)
+{
+ struct qlcnic_host_sds_ring *sds_ring = data;
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ goto done;
+ else if (adapter->flags & QLCNIC_MSI_ENABLED) {
+ writel(0xffffffff, adapter->tgt_status_reg);
+ goto done;
+ }
+
+ if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
+ return IRQ_NONE;
+
+done:
+ adapter->ahw->diag_cnt++;
+ qlcnic_enable_sds_intr(adapter, sds_ring);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qlcnic_intr(int irq, void *data)
+{
+ struct qlcnic_host_sds_ring *sds_ring = data;
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+
+ if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
+ return IRQ_NONE;
+
+ napi_schedule(&sds_ring->napi);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qlcnic_msi_intr(int irq, void *data)
+{
+ struct qlcnic_host_sds_ring *sds_ring = data;
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+
+ /* clear interrupt */
+ writel(0xffffffff, adapter->tgt_status_reg);
+
+ napi_schedule(&sds_ring->napi);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qlcnic_msix_intr(int irq, void *data)
+{
+ struct qlcnic_host_sds_ring *sds_ring = data;
+
+ napi_schedule(&sds_ring->napi);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qlcnic_msix_tx_intr(int irq, void *data)
+{
+ struct qlcnic_host_tx_ring *tx_ring = data;
+
+ napi_schedule(&tx_ring->napi);
+ return IRQ_HANDLED;
+}
+
+static void
+qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding)
+{
+ u32 val;
+
+ val = adapter->portnum & 0xf;
+ val |= encoding << 7;
+ val |= (jiffies - adapter->dev_rst_time) << 8;
+
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_SCRATCH, val);
+ adapter->dev_rst_time = jiffies;
+}
+
+static int
+qlcnic_set_drv_state(struct qlcnic_adapter *adapter, u8 state)
+{
+ u32 val;
+
+ WARN_ON(state != QLCNIC_DEV_NEED_RESET &&
+ state != QLCNIC_DEV_NEED_QUISCENT);
+
+ if (qlcnic_api_lock(adapter))
+ return -EIO;
+
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE);
+
+ if (state == QLCNIC_DEV_NEED_RESET)
+ QLC_DEV_SET_RST_RDY(val, adapter->portnum);
+ else if (state == QLCNIC_DEV_NEED_QUISCENT)
+ QLC_DEV_SET_QSCNT_RDY(val, adapter->portnum);
+
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+
+ qlcnic_api_unlock(adapter);
+
+ return 0;
+}
+
+static int
+qlcnic_clr_drv_state(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+
+ if (qlcnic_api_lock(adapter))
+ return -EBUSY;
+
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE);
+ QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+
+ qlcnic_api_unlock(adapter);
+
+ return 0;
+}
+
+void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed)
+{
+ u32 val;
+
+ if (qlcnic_api_lock(adapter))
+ goto err;
+
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
+ QLC_DEV_CLR_REF_CNT(val, adapter->portnum);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
+
+ if (failed) {
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE,
+ QLCNIC_DEV_FAILED);
+ dev_info(&adapter->pdev->dev,
+ "Device state set to Failed. Please Reboot\n");
+ } else if (!(val & 0x11111111))
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE,
+ QLCNIC_DEV_COLD);
+
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE);
+ QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+
+ qlcnic_api_unlock(adapter);
+err:
+ adapter->fw_fail_cnt = 0;
+ adapter->flags &= ~QLCNIC_FW_HANG;
+ clear_bit(__QLCNIC_START_FW, &adapter->state);
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+}
+
+/* Grab api lock, before checking state */
+static int
+qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
+{
+ int act, state, active_mask;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE);
+ act = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
+
+ if (adapter->flags & QLCNIC_FW_RESET_OWNER) {
+ active_mask = (~(1 << (ahw->pci_func * 4)));
+ act = act & active_mask;
+ }
+
+ if (((state & 0x11111111) == (act & 0x11111111)) ||
+ ((act & 0x11111111) == ((state >> 1) & 0x11111111)))
+ return 0;
+ else
+ return 1;
+}
+
+static int qlcnic_check_idc_ver(struct qlcnic_adapter *adapter)
+{
+ u32 val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_IDC_VER);
+
+ if (val != QLCNIC_DRV_IDC_VER) {
+ dev_warn(&adapter->pdev->dev, "IDC Version mismatch, driver's"
+ " idc ver = %x; reqd = %x\n", QLCNIC_DRV_IDC_VER, val);
+ }
+
+ return 0;
+}
+
+static int
+qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
+{
+ u32 val, prev_state;
+ u8 dev_init_timeo = adapter->dev_init_timeo;
+ u8 portnum = adapter->portnum;
+ u8 ret;
+
+ if (test_and_clear_bit(__QLCNIC_START_FW, &adapter->state))
+ return 1;
+
+ if (qlcnic_api_lock(adapter))
+ return -1;
+
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
+ if (!(val & (1 << (portnum * 4)))) {
+ QLC_DEV_SET_REF_CNT(val, portnum);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
+ }
+
+ prev_state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
+ QLCDB(adapter, HW, "Device state = %u\n", prev_state);
+
+ switch (prev_state) {
+ case QLCNIC_DEV_COLD:
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE,
+ QLCNIC_DEV_INITIALIZING);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_IDC_VER,
+ QLCNIC_DRV_IDC_VER);
+ qlcnic_idc_debug_info(adapter, 0);
+ qlcnic_api_unlock(adapter);
+ return 1;
+
+ case QLCNIC_DEV_READY:
+ ret = qlcnic_check_idc_ver(adapter);
+ qlcnic_api_unlock(adapter);
+ return ret;
+
+ case QLCNIC_DEV_NEED_RESET:
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE);
+ QLC_DEV_SET_RST_RDY(val, portnum);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+ break;
+
+ case QLCNIC_DEV_NEED_QUISCENT:
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE);
+ QLC_DEV_SET_QSCNT_RDY(val, portnum);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+ break;
+
+ case QLCNIC_DEV_FAILED:
+ dev_err(&adapter->pdev->dev, "Device in failed state.\n");
+ qlcnic_api_unlock(adapter);
+ return -1;
+
+ case QLCNIC_DEV_INITIALIZING:
+ case QLCNIC_DEV_QUISCENT:
+ break;
+ }
+
+ qlcnic_api_unlock(adapter);
+
+ do {
+ msleep(1000);
+ prev_state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
+ } while ((prev_state != QLCNIC_DEV_READY) && --dev_init_timeo);
+
+ if (!dev_init_timeo) {
+ dev_err(&adapter->pdev->dev,
+ "Waiting for device to initialize timeout\n");
+ return -1;
+ }
+
+ if (qlcnic_api_lock(adapter))
+ return -1;
+
+ val = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DRV_STATE);
+ QLC_DEV_CLR_RST_QSCNT(val, portnum);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+
+ ret = qlcnic_check_idc_ver(adapter);
+ qlcnic_api_unlock(adapter);
+
+ return ret;
+}
+
+static void
+qlcnic_fwinit_work(struct work_struct *work)
+{
+ struct qlcnic_adapter *adapter = container_of(work,
+ struct qlcnic_adapter, fw_work.work);
+ u32 dev_state = 0xf;
+ u32 val;
+
+ if (qlcnic_api_lock(adapter))
+ goto err_ret;
+
+ dev_state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
+ if (dev_state == QLCNIC_DEV_QUISCENT ||
+ dev_state == QLCNIC_DEV_NEED_QUISCENT) {
+ qlcnic_api_unlock(adapter);
+ qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
+ FW_POLL_DELAY * 2);
+ return;
+ }
+
+ if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
+ qlcnic_api_unlock(adapter);
+ goto wait_npar;
+ }
+
+ if (dev_state == QLCNIC_DEV_INITIALIZING ||
+ dev_state == QLCNIC_DEV_READY) {
+ dev_info(&adapter->pdev->dev, "Detected state change from "
+ "DEV_NEED_RESET, skipping ack check\n");
+ goto skip_ack_check;
+ }
+
+ if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) {
+ dev_info(&adapter->pdev->dev, "Reset:Failed to get ack %d sec\n",
+ adapter->reset_ack_timeo);
+ goto skip_ack_check;
+ }
+
+ if (!qlcnic_check_drv_state(adapter)) {
+skip_ack_check:
+ dev_state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
+
+ if (dev_state == QLCNIC_DEV_NEED_RESET) {
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE,
+ QLCNIC_DEV_INITIALIZING);
+ set_bit(__QLCNIC_START_FW, &adapter->state);
+ QLCDB(adapter, DRV, "Restarting fw\n");
+ qlcnic_idc_debug_info(adapter, 0);
+ val = QLC_SHARED_REG_RD32(adapter,
+ QLCNIC_CRB_DRV_STATE);
+ QLC_DEV_SET_RST_RDY(val, adapter->portnum);
+ QLC_SHARED_REG_WR32(adapter,
+ QLCNIC_CRB_DRV_STATE, val);
+ }
+
+ qlcnic_api_unlock(adapter);
+
+ rtnl_lock();
+ if (qlcnic_check_fw_dump_state(adapter) &&
+ (adapter->flags & QLCNIC_FW_RESET_OWNER)) {
+ QLCDB(adapter, DRV, "Take FW dump\n");
+ qlcnic_dump_fw(adapter);
+ adapter->flags |= QLCNIC_FW_HANG;
+ }
+ rtnl_unlock();
+
+ adapter->flags &= ~QLCNIC_FW_RESET_OWNER;
+ if (!adapter->nic_ops->start_firmware(adapter)) {
+ qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
+ adapter->fw_wait_cnt = 0;
+ return;
+ }
+ goto err_ret;
+ }
+
+ qlcnic_api_unlock(adapter);
+
+wait_npar:
+ dev_state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
+ QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state);
+
+ switch (dev_state) {
+ case QLCNIC_DEV_READY:
+ if (!qlcnic_start_firmware(adapter)) {
+ qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
+ adapter->fw_wait_cnt = 0;
+ return;
+ }
+ break;
+ case QLCNIC_DEV_FAILED:
+ break;
+ default:
+ qlcnic_schedule_work(adapter,
+ qlcnic_fwinit_work, FW_POLL_DELAY);
+ return;
+ }
+
+err_ret:
+ dev_err(&adapter->pdev->dev, "Fwinit work failed state=%u "
+ "fw_wait_cnt=%u\n", dev_state, adapter->fw_wait_cnt);
+ netif_device_attach(adapter->netdev);
+ qlcnic_clr_all_drv_state(adapter, 0);
+}
+
+static void
+qlcnic_detach_work(struct work_struct *work)
+{
+ struct qlcnic_adapter *adapter = container_of(work,
+ struct qlcnic_adapter, fw_work.work);
+ struct net_device *netdev = adapter->netdev;
+ u32 status;
+
+ netif_device_detach(netdev);
+
+ /* Dont grab rtnl lock during Quiscent mode */
+ if (adapter->dev_state == QLCNIC_DEV_NEED_QUISCENT) {
+ if (netif_running(netdev))
+ __qlcnic_down(adapter, netdev);
+ } else
+ qlcnic_down(adapter, netdev);
+
+ status = QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_HALT_STATUS1);
+
+ if (status & QLCNIC_RCODE_FATAL_ERROR) {
+ dev_err(&adapter->pdev->dev,
+ "Detaching the device: peg halt status1=0x%x\n",
+ status);
+
+ if (QLCNIC_FWERROR_CODE(status) == QLCNIC_FWERROR_FAN_FAILURE) {
+ dev_err(&adapter->pdev->dev,
+ "On board active cooling fan failed. "
+ "Device has been halted.\n");
+ dev_err(&adapter->pdev->dev,
+ "Replace the adapter.\n");
+ }
+
+ goto err_ret;
+ }
+
+ if (adapter->ahw->temp == QLCNIC_TEMP_PANIC) {
+ dev_err(&adapter->pdev->dev, "Detaching the device: temp=%d\n",
+ adapter->ahw->temp);
+ goto err_ret;
+ }
+
+ /* Dont ack if this instance is the reset owner */
+ if (!(adapter->flags & QLCNIC_FW_RESET_OWNER)) {
+ if (qlcnic_set_drv_state(adapter, adapter->dev_state)) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to set driver state,"
+ "detaching the device.\n");
+ goto err_ret;
+ }
+ }
+
+ adapter->fw_wait_cnt = 0;
+
+ qlcnic_schedule_work(adapter, qlcnic_fwinit_work, FW_POLL_DELAY);
+
+ return;
+
+err_ret:
+ netif_device_attach(netdev);
+ qlcnic_clr_all_drv_state(adapter, 1);
+}
+
+/*Transit NPAR state to NON Operational */
+static void
+qlcnic_set_npar_non_operational(struct qlcnic_adapter *adapter)
+{
+ u32 state;
+
+ state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
+ if (state == QLCNIC_DEV_NPAR_NON_OPER)
+ return;
+
+ if (qlcnic_api_lock(adapter))
+ return;
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE,
+ QLCNIC_DEV_NPAR_NON_OPER);
+ qlcnic_api_unlock(adapter);
+}
+
+static void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *adapter,
+ u32 key)
+{
+ u32 state, xg_val = 0, gb_val = 0;
+
+ qlcnic_xg_set_xg0_mask(xg_val);
+ qlcnic_xg_set_xg1_mask(xg_val);
+ QLCWR32(adapter, QLCNIC_NIU_XG_PAUSE_CTL, xg_val);
+ qlcnic_gb_set_gb0_mask(gb_val);
+ qlcnic_gb_set_gb1_mask(gb_val);
+ qlcnic_gb_set_gb2_mask(gb_val);
+ qlcnic_gb_set_gb3_mask(gb_val);
+ QLCWR32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, gb_val);
+ dev_info(&adapter->pdev->dev, "Pause control frames disabled"
+ " on all ports\n");
+ adapter->need_fw_reset = 1;
+
+ if (qlcnic_api_lock(adapter))
+ return;
+
+ state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
+
+ if (test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state)) {
+ netdev_err(adapter->netdev, "%s: Device is in non-operational state\n",
+ __func__);
+ qlcnic_api_unlock(adapter);
+
+ return;
+ }
+
+ if (state == QLCNIC_DEV_READY) {
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE,
+ QLCNIC_DEV_NEED_RESET);
+ adapter->flags |= QLCNIC_FW_RESET_OWNER;
+ QLCDB(adapter, DRV, "NEED_RESET state set\n");
+ qlcnic_idc_debug_info(adapter, 0);
+ }
+
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE,
+ QLCNIC_DEV_NPAR_NON_OPER);
+ qlcnic_api_unlock(adapter);
+}
+
+/* Transit to NPAR READY state from NPAR NOT READY state */
+static void
+qlcnic_dev_set_npar_ready(struct qlcnic_adapter *adapter)
+{
+ if (qlcnic_api_lock(adapter))
+ return;
+
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE,
+ QLCNIC_DEV_NPAR_OPER);
+ QLCDB(adapter, DRV, "NPAR operational state set\n");
+
+ qlcnic_api_unlock(adapter);
+}
+
+void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
+ work_func_t func, int delay)
+{
+ if (test_bit(__QLCNIC_AER, &adapter->state))
+ return;
+
+ INIT_DELAYED_WORK(&adapter->fw_work, func);
+ queue_delayed_work(adapter->qlcnic_wq, &adapter->fw_work,
+ round_jiffies_relative(delay));
+}
+
+static void
+qlcnic_attach_work(struct work_struct *work)
+{
+ struct qlcnic_adapter *adapter = container_of(work,
+ struct qlcnic_adapter, fw_work.work);
+ struct net_device *netdev = adapter->netdev;
+ u32 npar_state;
+
+ if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) {
+ npar_state = QLC_SHARED_REG_RD32(adapter,
+ QLCNIC_CRB_DEV_NPAR_STATE);
+ if (adapter->fw_wait_cnt++ > QLCNIC_DEV_NPAR_OPER_TIMEO)
+ qlcnic_clr_all_drv_state(adapter, 0);
+ else if (npar_state != QLCNIC_DEV_NPAR_OPER)
+ qlcnic_schedule_work(adapter, qlcnic_attach_work,
+ FW_POLL_DELAY);
+ else
+ goto attach;
+ QLCDB(adapter, DRV, "Waiting for NPAR state to operational\n");
+ return;
+ }
+attach:
+ qlcnic_dcb_get_info(adapter->dcb);
+
+ if (netif_running(netdev)) {
+ if (qlcnic_up(adapter, netdev))
+ goto done;
+
+ qlcnic_restore_indev_addr(netdev, NETDEV_UP);
+ }
+
+done:
+ netif_device_attach(netdev);
+ adapter->fw_fail_cnt = 0;
+ adapter->flags &= ~QLCNIC_FW_HANG;
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ if (adapter->portnum == 0)
+ qlcnic_set_drv_version(adapter);
+
+ if (!qlcnic_clr_drv_state(adapter))
+ qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
+ FW_POLL_DELAY);
+}
+
+static int
+qlcnic_check_health(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump;
+ u32 state = 0, heartbeat;
+ u32 peg_status;
+ int err = 0;
+
+ if (qlcnic_check_temp(adapter))
+ goto detach;
+
+ if (adapter->need_fw_reset)
+ qlcnic_dev_request_reset(adapter, 0);
+
+ state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
+ if (state == QLCNIC_DEV_NEED_RESET) {
+ qlcnic_set_npar_non_operational(adapter);
+ adapter->need_fw_reset = 1;
+ } else if (state == QLCNIC_DEV_NEED_QUISCENT)
+ goto detach;
+
+ heartbeat = QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
+ if (heartbeat != adapter->heartbeat) {
+ adapter->heartbeat = heartbeat;
+ adapter->fw_fail_cnt = 0;
+ if (adapter->need_fw_reset)
+ goto detach;
+
+ if (ahw->reset_context && qlcnic_auto_fw_reset)
+ qlcnic_reset_hw_context(adapter);
+
+ return 0;
+ }
+
+ if (++adapter->fw_fail_cnt < FW_FAIL_THRESH)
+ return 0;
+
+ adapter->flags |= QLCNIC_FW_HANG;
+
+ qlcnic_dev_request_reset(adapter, 0);
+
+ if (qlcnic_auto_fw_reset)
+ clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
+
+ dev_err(&adapter->pdev->dev, "firmware hang detected\n");
+ peg_status = QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_HALT_STATUS1);
+ dev_err(&adapter->pdev->dev, "Dumping hw/fw registers\n"
+ "PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n"
+ "PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n"
+ "PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n"
+ "PEG_NET_4_PC: 0x%x\n",
+ peg_status,
+ QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_HALT_STATUS2),
+ QLCRD32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x3c, &err),
+ QLCRD32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x3c, &err),
+ QLCRD32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x3c, &err),
+ QLCRD32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x3c, &err),
+ QLCRD32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x3c, &err));
+ if (QLCNIC_FWERROR_CODE(peg_status) == 0x67)
+ dev_err(&adapter->pdev->dev,
+ "Firmware aborted with error code 0x00006700. "
+ "Device is being reset.\n");
+detach:
+ adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state :
+ QLCNIC_DEV_NEED_RESET;
+
+ if (qlcnic_auto_fw_reset && !test_and_set_bit(__QLCNIC_RESETTING,
+ &adapter->state)) {
+
+ qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
+ QLCDB(adapter, DRV, "fw recovery scheduled.\n");
+ } else if (!qlcnic_auto_fw_reset && fw_dump->enable &&
+ adapter->flags & QLCNIC_FW_RESET_OWNER) {
+ qlcnic_dump_fw(adapter);
+ }
+
+ return 1;
+}
+
+void qlcnic_fw_poll_work(struct work_struct *work)
+{
+ struct qlcnic_adapter *adapter = container_of(work,
+ struct qlcnic_adapter, fw_work.work);
+
+ if (test_bit(__QLCNIC_RESETTING, &adapter->state))
+ goto reschedule;
+
+
+ if (qlcnic_check_health(adapter))
+ return;
+
+ if (adapter->fhash.fnum)
+ qlcnic_prune_lb_filters(adapter);
+
+reschedule:
+ qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
+}
+
+static int qlcnic_is_first_func(struct pci_dev *pdev)
+{
+ struct pci_dev *oth_pdev;
+ int val = pdev->devfn;
+
+ while (val-- > 0) {
+ oth_pdev = pci_get_domain_bus_and_slot(pci_domain_nr
+ (pdev->bus), pdev->bus->number,
+ PCI_DEVFN(PCI_SLOT(pdev->devfn), val));
+ if (!oth_pdev)
+ continue;
+
+ if (oth_pdev->current_state != PCI_D3cold) {
+ pci_dev_put(oth_pdev);
+ return 0;
+ }
+ pci_dev_put(oth_pdev);
+ }
+ return 1;
+}
+
+static int qlcnic_attach_func(struct pci_dev *pdev)
+{
+ int err, first_func;
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+
+ pdev->error_state = pci_channel_io_normal;
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+
+ first_func = qlcnic_is_first_func(pdev);
+
+ if (qlcnic_api_lock(adapter))
+ return -EINVAL;
+
+ if (adapter->ahw->op_mode != QLCNIC_NON_PRIV_FUNC && first_func) {
+ adapter->need_fw_reset = 1;
+ set_bit(__QLCNIC_START_FW, &adapter->state);
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_CRB_DEV_STATE,
+ QLCNIC_DEV_INITIALIZING);
+ QLCDB(adapter, DRV, "Restarting fw\n");
+ }
+ qlcnic_api_unlock(adapter);
+
+ err = qlcnic_start_firmware(adapter);
+ if (err)
+ return err;
+
+ qlcnic_clr_drv_state(adapter);
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+ err = qlcnic_setup_intr(adapter);
+
+ if (err) {
+ kfree(adapter->msix_entries);
+ netdev_err(netdev, "failed to setup interrupt\n");
+ return err;
+ }
+
+ if (netif_running(netdev)) {
+ err = qlcnic_attach(adapter);
+ if (err) {
+ qlcnic_clr_all_drv_state(adapter, 1);
+ clear_bit(__QLCNIC_AER, &adapter->state);
+ netif_device_attach(netdev);
+ return err;
+ }
+
+ err = qlcnic_up(adapter, netdev);
+ if (err)
+ goto done;
+
+ qlcnic_restore_indev_addr(netdev, NETDEV_UP);
+ }
+ done:
+ netif_device_attach(netdev);
+ return err;
+}
+
+static pci_ers_result_t qlcnic_82xx_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ if (state == pci_channel_io_normal)
+ return PCI_ERS_RESULT_RECOVERED;
+
+ set_bit(__QLCNIC_AER, &adapter->state);
+ netif_device_detach(netdev);
+
+ cancel_delayed_work_sync(&adapter->fw_work);
+
+ if (netif_running(netdev))
+ qlcnic_down(adapter, netdev);
+
+ qlcnic_detach(adapter);
+ qlcnic_teardown_intr(adapter);
+
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t qlcnic_82xx_io_slot_reset(struct pci_dev *pdev)
+{
+ pci_ers_result_t res;
+
+ rtnl_lock();
+ res = qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT :
+ PCI_ERS_RESULT_RECOVERED;
+ rtnl_unlock();
+
+ return res;
+}
+
+static void qlcnic_82xx_io_resume(struct pci_dev *pdev)
+{
+ u32 state;
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+
+ state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
+ if (state == QLCNIC_DEV_READY && test_and_clear_bit(__QLCNIC_AER,
+ &adapter->state))
+ qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
+ FW_POLL_DELAY);
+}
+
+static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+ struct qlcnic_hardware_ops *hw_ops = adapter->ahw->hw_ops;
+
+ if (hw_ops->io_error_detected) {
+ return hw_ops->io_error_detected(pdev, state);
+ } else {
+ dev_err(&pdev->dev, "AER error_detected handler not registered.\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+}
+
+static pci_ers_result_t qlcnic_io_slot_reset(struct pci_dev *pdev)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+ struct qlcnic_hardware_ops *hw_ops = adapter->ahw->hw_ops;
+
+ if (hw_ops->io_slot_reset) {
+ return hw_ops->io_slot_reset(pdev);
+ } else {
+ dev_err(&pdev->dev, "AER slot_reset handler not registered.\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+}
+
+static void qlcnic_io_resume(struct pci_dev *pdev)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+ struct qlcnic_hardware_ops *hw_ops = adapter->ahw->hw_ops;
+
+ if (hw_ops->io_resume)
+ hw_ops->io_resume(pdev);
+ else
+ dev_err(&pdev->dev, "AER resume handler not registered.\n");
+}
+
+
+static int
+qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
+{
+ int err;
+
+ err = qlcnic_can_start_firmware(adapter);
+ if (err)
+ return err;
+
+ err = qlcnic_check_npar_opertional(adapter);
+ if (err)
+ return err;
+
+ err = qlcnic_initialize_nic(adapter);
+ if (err)
+ return err;
+
+ qlcnic_check_options(adapter);
+
+ err = qlcnic_set_eswitch_port_config(adapter);
+ if (err)
+ return err;
+
+ adapter->need_fw_reset = 0;
+
+ return err;
+}
+
+int qlcnic_validate_rings(struct qlcnic_adapter *adapter, __u32 ring_cnt,
+ int queue_type)
+{
+ struct net_device *netdev = adapter->netdev;
+ char buf[8];
+
+ if (queue_type == QLCNIC_RX_QUEUE)
+ strcpy(buf, "SDS");
+ else
+ strcpy(buf, "Tx");
+
+ if (!is_power_of_2(ring_cnt)) {
+ netdev_err(netdev, "%s rings value should be a power of 2\n",
+ buf);
+ return -EINVAL;
+ }
+
+ if (qlcnic_82xx_check(adapter) && (queue_type == QLCNIC_TX_QUEUE) &&
+ !qlcnic_check_multi_tx(adapter)) {
+ netdev_err(netdev, "No Multi Tx queue support\n");
+ return -EINVAL;
+ }
+
+ if (ring_cnt > num_online_cpus()) {
+ netdev_err(netdev,
+ "%s value[%u] should not be higher than, number of online CPUs\n",
+ buf, num_online_cpus());
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int qlcnic_setup_rings(struct qlcnic_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ u8 tx_rings, rx_rings;
+ int err;
+
+ if (test_bit(__QLCNIC_RESETTING, &adapter->state))
+ return -EBUSY;
+
+ tx_rings = adapter->drv_tss_rings;
+ rx_rings = adapter->drv_rss_rings;
+
+ netif_device_detach(netdev);
+
+ err = qlcnic_set_real_num_queues(adapter, tx_rings, rx_rings);
+ if (err)
+ goto done;
+
+ if (netif_running(netdev))
+ __qlcnic_down(adapter, netdev);
+
+ qlcnic_detach(adapter);
+
+ if (qlcnic_83xx_check(adapter)) {
+ qlcnic_83xx_free_mbx_intr(adapter);
+ qlcnic_83xx_enable_mbx_poll(adapter);
+ }
+
+ qlcnic_teardown_intr(adapter);
+
+ err = qlcnic_setup_intr(adapter);
+ if (err) {
+ kfree(adapter->msix_entries);
+ netdev_err(netdev, "failed to setup interrupt\n");
+ return err;
+ }
+
+ /* Check if we need to update real_num_{tx|rx}_queues because
+ * qlcnic_setup_intr() may change Tx/Rx rings size
+ */
+ if ((tx_rings != adapter->drv_tx_rings) ||
+ (rx_rings != adapter->drv_sds_rings)) {
+ err = qlcnic_set_real_num_queues(adapter,
+ adapter->drv_tx_rings,
+ adapter->drv_sds_rings);
+ if (err)
+ goto done;
+ }
+
+ if (qlcnic_83xx_check(adapter)) {
+ qlcnic_83xx_initialize_nic(adapter, 1);
+ err = qlcnic_83xx_setup_mbx_intr(adapter);
+ qlcnic_83xx_disable_mbx_poll(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "failed to setup mbx interrupt\n");
+ goto done;
+ }
+ }
+
+ if (netif_running(netdev)) {
+ err = qlcnic_attach(adapter);
+ if (err)
+ goto done;
+ err = __qlcnic_up(adapter, netdev);
+ if (err)
+ goto done;
+ qlcnic_restore_indev_addr(netdev, NETDEV_UP);
+ }
+done:
+ netif_device_attach(netdev);
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return err;
+}
+
+#ifdef CONFIG_INET
+
+#define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops)
+
+static void
+qlcnic_config_indev_addr(struct qlcnic_adapter *adapter,
+ struct net_device *dev, unsigned long event)
+{
+ const struct in_ifaddr *ifa;
+ struct in_device *indev;
+
+ indev = in_dev_get(dev);
+ if (!indev)
+ return;
+
+ in_dev_for_each_ifa_rtnl(ifa, indev) {
+ switch (event) {
+ case NETDEV_UP:
+ qlcnic_config_ipaddr(adapter,
+ ifa->ifa_address, QLCNIC_IP_UP);
+ break;
+ case NETDEV_DOWN:
+ qlcnic_config_ipaddr(adapter,
+ ifa->ifa_address, QLCNIC_IP_DOWN);
+ break;
+ default:
+ break;
+ }
+ }
+
+ in_dev_put(indev);
+}
+
+void qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct net_device *dev;
+ u16 vid;
+
+ qlcnic_config_indev_addr(adapter, netdev, event);
+
+ rcu_read_lock();
+ for_each_set_bit(vid, adapter->vlans, VLAN_N_VID) {
+ dev = __vlan_find_dev_deep_rcu(netdev, htons(ETH_P_8021Q), vid);
+ if (!dev)
+ continue;
+ qlcnic_config_indev_addr(adapter, dev, event);
+ }
+ rcu_read_unlock();
+}
+
+static int qlcnic_netdev_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct qlcnic_adapter *adapter;
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+
+recheck:
+ if (dev == NULL)
+ goto done;
+
+ if (is_vlan_dev(dev)) {
+ dev = vlan_dev_real_dev(dev);
+ goto recheck;
+ }
+
+ if (!is_qlcnic_netdev(dev))
+ goto done;
+
+ adapter = netdev_priv(dev);
+
+ if (!adapter)
+ goto done;
+
+ if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
+ goto done;
+
+ qlcnic_config_indev_addr(adapter, dev, event);
+done:
+ return NOTIFY_DONE;
+}
+
+static int
+qlcnic_inetaddr_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct qlcnic_adapter *adapter;
+ struct net_device *dev;
+
+ struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
+
+ dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
+
+recheck:
+ if (dev == NULL)
+ goto done;
+
+ if (is_vlan_dev(dev)) {
+ dev = vlan_dev_real_dev(dev);
+ goto recheck;
+ }
+
+ if (!is_qlcnic_netdev(dev))
+ goto done;
+
+ adapter = netdev_priv(dev);
+
+ if (!adapter)
+ goto done;
+
+ if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
+ goto done;
+
+ switch (event) {
+ case NETDEV_UP:
+ qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_UP);
+
+ break;
+ case NETDEV_DOWN:
+ qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_DOWN);
+
+ break;
+ default:
+ break;
+ }
+
+done:
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block qlcnic_netdev_cb = {
+ .notifier_call = qlcnic_netdev_event,
+};
+
+static struct notifier_block qlcnic_inetaddr_cb = {
+ .notifier_call = qlcnic_inetaddr_event,
+};
+#else
+void qlcnic_restore_indev_addr(struct net_device *dev, unsigned long event)
+{ }
+#endif
+static const struct pci_error_handlers qlcnic_err_handler = {
+ .error_detected = qlcnic_io_error_detected,
+ .slot_reset = qlcnic_io_slot_reset,
+ .resume = qlcnic_io_resume,
+};
+
+static SIMPLE_DEV_PM_OPS(qlcnic_pm_ops, qlcnic_suspend, qlcnic_resume);
+
+static struct pci_driver qlcnic_driver = {
+ .name = qlcnic_driver_name,
+ .id_table = qlcnic_pci_tbl,
+ .probe = qlcnic_probe,
+ .remove = qlcnic_remove,
+ .driver.pm = &qlcnic_pm_ops,
+ .shutdown = qlcnic_shutdown,
+ .err_handler = &qlcnic_err_handler,
+#ifdef CONFIG_QLCNIC_SRIOV
+ .sriov_configure = qlcnic_pci_sriov_configure,
+#endif
+
+};
+
+static int __init qlcnic_init_module(void)
+{
+ int ret;
+
+ printk(KERN_INFO "%s\n", qlcnic_driver_string);
+
+#ifdef CONFIG_INET
+ register_netdevice_notifier(&qlcnic_netdev_cb);
+ register_inetaddr_notifier(&qlcnic_inetaddr_cb);
+#endif
+
+ ret = pci_register_driver(&qlcnic_driver);
+ if (ret) {
+#ifdef CONFIG_INET
+ unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
+ unregister_netdevice_notifier(&qlcnic_netdev_cb);
+#endif
+ }
+
+ return ret;
+}
+
+module_init(qlcnic_init_module);
+
+static void __exit qlcnic_exit_module(void)
+{
+ pci_unregister_driver(&qlcnic_driver);
+
+#ifdef CONFIG_INET
+ unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
+ unregister_netdevice_notifier(&qlcnic_netdev_cb);
+#endif
+}
+
+module_exit(qlcnic_exit_module);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
new file mode 100644
index 000000000..7ecb3dfe3
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
@@ -0,0 +1,1455 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ */
+
+#include <net/ip.h>
+
+#include "qlcnic.h"
+#include "qlcnic_hdr.h"
+#include "qlcnic_83xx_hw.h"
+#include "qlcnic_hw.h"
+
+#define QLC_83XX_MINIDUMP_FLASH 0x520000
+#define QLC_83XX_OCM_INDEX 3
+#define QLC_83XX_PCI_INDEX 0
+#define QLC_83XX_DMA_ENGINE_INDEX 8
+
+static const u32 qlcnic_ms_read_data[] = {
+ 0x410000A8, 0x410000AC, 0x410000B8, 0x410000BC
+};
+
+#define QLCNIC_DUMP_WCRB BIT_0
+#define QLCNIC_DUMP_RWCRB BIT_1
+#define QLCNIC_DUMP_ANDCRB BIT_2
+#define QLCNIC_DUMP_ORCRB BIT_3
+#define QLCNIC_DUMP_POLLCRB BIT_4
+#define QLCNIC_DUMP_RD_SAVE BIT_5
+#define QLCNIC_DUMP_WRT_SAVED BIT_6
+#define QLCNIC_DUMP_MOD_SAVE_ST BIT_7
+#define QLCNIC_DUMP_SKIP BIT_7
+
+#define QLCNIC_DUMP_MASK_MAX 0xff
+
+struct qlcnic_pex_dma_descriptor {
+ u32 read_data_size;
+ u32 dma_desc_cmd;
+ u32 src_addr_low;
+ u32 src_addr_high;
+ u32 dma_bus_addr_low;
+ u32 dma_bus_addr_high;
+ u32 rsvd[6];
+} __packed;
+
+struct qlcnic_common_entry_hdr {
+ u32 type;
+ u32 offset;
+ u32 cap_size;
+#if defined(__LITTLE_ENDIAN)
+ u8 mask;
+ u8 rsvd[2];
+ u8 flags;
+#else
+ u8 flags;
+ u8 rsvd[2];
+ u8 mask;
+#endif
+} __packed;
+
+struct __crb {
+ u32 addr;
+#if defined(__LITTLE_ENDIAN)
+ u8 stride;
+ u8 rsvd1[3];
+#else
+ u8 rsvd1[3];
+ u8 stride;
+#endif
+ u32 data_size;
+ u32 no_ops;
+ u32 rsvd2[4];
+} __packed;
+
+struct __ctrl {
+ u32 addr;
+#if defined(__LITTLE_ENDIAN)
+ u8 stride;
+ u8 index_a;
+ u16 timeout;
+#else
+ u16 timeout;
+ u8 index_a;
+ u8 stride;
+#endif
+ u32 data_size;
+ u32 no_ops;
+#if defined(__LITTLE_ENDIAN)
+ u8 opcode;
+ u8 index_v;
+ u8 shl_val;
+ u8 shr_val;
+#else
+ u8 shr_val;
+ u8 shl_val;
+ u8 index_v;
+ u8 opcode;
+#endif
+ u32 val1;
+ u32 val2;
+ u32 val3;
+} __packed;
+
+struct __cache {
+ u32 addr;
+#if defined(__LITTLE_ENDIAN)
+ u16 stride;
+ u16 init_tag_val;
+#else
+ u16 init_tag_val;
+ u16 stride;
+#endif
+ u32 size;
+ u32 no_ops;
+ u32 ctrl_addr;
+ u32 ctrl_val;
+ u32 read_addr;
+#if defined(__LITTLE_ENDIAN)
+ u8 read_addr_stride;
+ u8 read_addr_num;
+ u8 rsvd1[2];
+#else
+ u8 rsvd1[2];
+ u8 read_addr_num;
+ u8 read_addr_stride;
+#endif
+} __packed;
+
+struct __ocm {
+ u8 rsvd[8];
+ u32 size;
+ u32 no_ops;
+ u8 rsvd1[8];
+ u32 read_addr;
+ u32 read_addr_stride;
+} __packed;
+
+struct __mem {
+ u32 desc_card_addr;
+ u32 dma_desc_cmd;
+ u32 start_dma_cmd;
+ u32 rsvd[3];
+ u32 addr;
+ u32 size;
+} __packed;
+
+struct __mux {
+ u32 addr;
+ u8 rsvd[4];
+ u32 size;
+ u32 no_ops;
+ u32 val;
+ u32 val_stride;
+ u32 read_addr;
+ u8 rsvd2[4];
+} __packed;
+
+struct __queue {
+ u32 sel_addr;
+#if defined(__LITTLE_ENDIAN)
+ u16 stride;
+ u8 rsvd[2];
+#else
+ u8 rsvd[2];
+ u16 stride;
+#endif
+ u32 size;
+ u32 no_ops;
+ u8 rsvd2[8];
+ u32 read_addr;
+#if defined(__LITTLE_ENDIAN)
+ u8 read_addr_stride;
+ u8 read_addr_cnt;
+ u8 rsvd3[2];
+#else
+ u8 rsvd3[2];
+ u8 read_addr_cnt;
+ u8 read_addr_stride;
+#endif
+} __packed;
+
+struct __pollrd {
+ u32 sel_addr;
+ u32 read_addr;
+ u32 sel_val;
+#if defined(__LITTLE_ENDIAN)
+ u16 sel_val_stride;
+ u16 no_ops;
+#else
+ u16 no_ops;
+ u16 sel_val_stride;
+#endif
+ u32 poll_wait;
+ u32 poll_mask;
+ u32 data_size;
+ u8 rsvd[4];
+} __packed;
+
+struct __mux2 {
+ u32 sel_addr1;
+ u32 sel_addr2;
+ u32 sel_val1;
+ u32 sel_val2;
+ u32 no_ops;
+ u32 sel_val_mask;
+ u32 read_addr;
+#if defined(__LITTLE_ENDIAN)
+ u8 sel_val_stride;
+ u8 data_size;
+ u8 rsvd[2];
+#else
+ u8 rsvd[2];
+ u8 data_size;
+ u8 sel_val_stride;
+#endif
+} __packed;
+
+struct __pollrdmwr {
+ u32 addr1;
+ u32 addr2;
+ u32 val1;
+ u32 val2;
+ u32 poll_wait;
+ u32 poll_mask;
+ u32 mod_mask;
+ u32 data_size;
+} __packed;
+
+struct qlcnic_dump_entry {
+ struct qlcnic_common_entry_hdr hdr;
+ union {
+ struct __crb crb;
+ struct __cache cache;
+ struct __ocm ocm;
+ struct __mem mem;
+ struct __mux mux;
+ struct __queue que;
+ struct __ctrl ctrl;
+ struct __pollrdmwr pollrdmwr;
+ struct __mux2 mux2;
+ struct __pollrd pollrd;
+ } region;
+} __packed;
+
+enum qlcnic_minidump_opcode {
+ QLCNIC_DUMP_NOP = 0,
+ QLCNIC_DUMP_READ_CRB = 1,
+ QLCNIC_DUMP_READ_MUX = 2,
+ QLCNIC_DUMP_QUEUE = 3,
+ QLCNIC_DUMP_BRD_CONFIG = 4,
+ QLCNIC_DUMP_READ_OCM = 6,
+ QLCNIC_DUMP_PEG_REG = 7,
+ QLCNIC_DUMP_L1_DTAG = 8,
+ QLCNIC_DUMP_L1_ITAG = 9,
+ QLCNIC_DUMP_L1_DATA = 11,
+ QLCNIC_DUMP_L1_INST = 12,
+ QLCNIC_DUMP_L2_DTAG = 21,
+ QLCNIC_DUMP_L2_ITAG = 22,
+ QLCNIC_DUMP_L2_DATA = 23,
+ QLCNIC_DUMP_L2_INST = 24,
+ QLCNIC_DUMP_POLL_RD = 35,
+ QLCNIC_READ_MUX2 = 36,
+ QLCNIC_READ_POLLRDMWR = 37,
+ QLCNIC_DUMP_READ_ROM = 71,
+ QLCNIC_DUMP_READ_MEM = 72,
+ QLCNIC_DUMP_READ_CTRL = 98,
+ QLCNIC_DUMP_TLHDR = 99,
+ QLCNIC_DUMP_RDEND = 255
+};
+
+inline u32 qlcnic_82xx_get_saved_state(void *t_hdr, u32 index)
+{
+ struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
+
+ return hdr->saved_state[index];
+}
+
+inline void qlcnic_82xx_set_saved_state(void *t_hdr, u32 index,
+ u32 value)
+{
+ struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
+
+ hdr->saved_state[index] = value;
+}
+
+void qlcnic_82xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump)
+{
+ struct qlcnic_82xx_dump_template_hdr *hdr;
+
+ hdr = fw_dump->tmpl_hdr;
+ fw_dump->tmpl_hdr_size = hdr->size;
+ fw_dump->version = hdr->version;
+ fw_dump->num_entries = hdr->num_entries;
+ fw_dump->offset = hdr->offset;
+
+ hdr->drv_cap_mask = hdr->cap_mask;
+ fw_dump->cap_mask = hdr->cap_mask;
+
+ fw_dump->use_pex_dma = (hdr->capabilities & BIT_0) ? true : false;
+}
+
+inline u32 qlcnic_82xx_get_cap_size(void *t_hdr, int index)
+{
+ struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
+
+ return hdr->cap_sizes[index];
+}
+
+void qlcnic_82xx_set_sys_info(void *t_hdr, int idx, u32 value)
+{
+ struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
+
+ hdr->sys_info[idx] = value;
+}
+
+void qlcnic_82xx_store_cap_mask(void *tmpl_hdr, u32 mask)
+{
+ struct qlcnic_82xx_dump_template_hdr *hdr = tmpl_hdr;
+
+ hdr->drv_cap_mask = mask;
+}
+
+inline u32 qlcnic_83xx_get_saved_state(void *t_hdr, u32 index)
+{
+ struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
+
+ return hdr->saved_state[index];
+}
+
+inline void qlcnic_83xx_set_saved_state(void *t_hdr, u32 index,
+ u32 value)
+{
+ struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
+
+ hdr->saved_state[index] = value;
+}
+
+#define QLCNIC_TEMPLATE_VERSION (0x20001)
+
+void qlcnic_83xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump)
+{
+ struct qlcnic_83xx_dump_template_hdr *hdr;
+
+ hdr = fw_dump->tmpl_hdr;
+ fw_dump->tmpl_hdr_size = hdr->size;
+ fw_dump->version = hdr->version;
+ fw_dump->num_entries = hdr->num_entries;
+ fw_dump->offset = hdr->offset;
+
+ hdr->drv_cap_mask = hdr->cap_mask;
+ fw_dump->cap_mask = hdr->cap_mask;
+
+ fw_dump->use_pex_dma = (fw_dump->version & 0xfffff) >=
+ QLCNIC_TEMPLATE_VERSION;
+}
+
+inline u32 qlcnic_83xx_get_cap_size(void *t_hdr, int index)
+{
+ struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
+
+ return hdr->cap_sizes[index];
+}
+
+void qlcnic_83xx_set_sys_info(void *t_hdr, int idx, u32 value)
+{
+ struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
+
+ hdr->sys_info[idx] = value;
+}
+
+void qlcnic_83xx_store_cap_mask(void *tmpl_hdr, u32 mask)
+{
+ struct qlcnic_83xx_dump_template_hdr *hdr;
+
+ hdr = tmpl_hdr;
+ hdr->drv_cap_mask = mask;
+}
+
+struct qlcnic_dump_operations {
+ enum qlcnic_minidump_opcode opcode;
+ u32 (*handler)(struct qlcnic_adapter *, struct qlcnic_dump_entry *,
+ __le32 *);
+};
+
+static u32 qlcnic_dump_crb(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+ int i;
+ u32 addr, data;
+ struct __crb *crb = &entry->region.crb;
+
+ addr = crb->addr;
+
+ for (i = 0; i < crb->no_ops; i++) {
+ data = qlcnic_ind_rd(adapter, addr);
+ *buffer++ = cpu_to_le32(addr);
+ *buffer++ = cpu_to_le32(data);
+ addr += crb->stride;
+ }
+ return crb->no_ops * 2 * sizeof(u32);
+}
+
+static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+ void *hdr = adapter->ahw->fw_dump.tmpl_hdr;
+ struct __ctrl *ctr = &entry->region.ctrl;
+ int i, k, timeout = 0;
+ u32 addr, data, temp;
+ u8 no_ops;
+
+ addr = ctr->addr;
+ no_ops = ctr->no_ops;
+
+ for (i = 0; i < no_ops; i++) {
+ k = 0;
+ for (k = 0; k < 8; k++) {
+ if (!(ctr->opcode & (1 << k)))
+ continue;
+ switch (1 << k) {
+ case QLCNIC_DUMP_WCRB:
+ qlcnic_ind_wr(adapter, addr, ctr->val1);
+ break;
+ case QLCNIC_DUMP_RWCRB:
+ data = qlcnic_ind_rd(adapter, addr);
+ qlcnic_ind_wr(adapter, addr, data);
+ break;
+ case QLCNIC_DUMP_ANDCRB:
+ data = qlcnic_ind_rd(adapter, addr);
+ qlcnic_ind_wr(adapter, addr,
+ (data & ctr->val2));
+ break;
+ case QLCNIC_DUMP_ORCRB:
+ data = qlcnic_ind_rd(adapter, addr);
+ qlcnic_ind_wr(adapter, addr,
+ (data | ctr->val3));
+ break;
+ case QLCNIC_DUMP_POLLCRB:
+ while (timeout <= ctr->timeout) {
+ data = qlcnic_ind_rd(adapter, addr);
+ if ((data & ctr->val2) == ctr->val1)
+ break;
+ usleep_range(1000, 2000);
+ timeout++;
+ }
+ if (timeout > ctr->timeout) {
+ dev_info(&adapter->pdev->dev,
+ "Timed out, aborting poll CRB\n");
+ return -EINVAL;
+ }
+ break;
+ case QLCNIC_DUMP_RD_SAVE:
+ temp = ctr->index_a;
+ if (temp)
+ addr = qlcnic_get_saved_state(adapter,
+ hdr,
+ temp);
+ data = qlcnic_ind_rd(adapter, addr);
+ qlcnic_set_saved_state(adapter, hdr,
+ ctr->index_v, data);
+ break;
+ case QLCNIC_DUMP_WRT_SAVED:
+ temp = ctr->index_v;
+ if (temp)
+ data = qlcnic_get_saved_state(adapter,
+ hdr,
+ temp);
+ else
+ data = ctr->val1;
+
+ temp = ctr->index_a;
+ if (temp)
+ addr = qlcnic_get_saved_state(adapter,
+ hdr,
+ temp);
+ qlcnic_ind_wr(adapter, addr, data);
+ break;
+ case QLCNIC_DUMP_MOD_SAVE_ST:
+ data = qlcnic_get_saved_state(adapter, hdr,
+ ctr->index_v);
+ data <<= ctr->shl_val;
+ data >>= ctr->shr_val;
+ if (ctr->val2)
+ data &= ctr->val2;
+ data |= ctr->val3;
+ data += ctr->val1;
+ qlcnic_set_saved_state(adapter, hdr,
+ ctr->index_v, data);
+ break;
+ default:
+ dev_info(&adapter->pdev->dev,
+ "Unknown opcode\n");
+ break;
+ }
+ }
+ addr += ctr->stride;
+ }
+ return 0;
+}
+
+static u32 qlcnic_dump_mux(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+ int loop;
+ u32 val, data = 0;
+ struct __mux *mux = &entry->region.mux;
+
+ val = mux->val;
+ for (loop = 0; loop < mux->no_ops; loop++) {
+ qlcnic_ind_wr(adapter, mux->addr, val);
+ data = qlcnic_ind_rd(adapter, mux->read_addr);
+ *buffer++ = cpu_to_le32(val);
+ *buffer++ = cpu_to_le32(data);
+ val += mux->val_stride;
+ }
+ return 2 * mux->no_ops * sizeof(u32);
+}
+
+static u32 qlcnic_dump_que(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+ int i, loop;
+ u32 cnt, addr, data, que_id = 0;
+ struct __queue *que = &entry->region.que;
+
+ addr = que->read_addr;
+ cnt = que->read_addr_cnt;
+
+ for (loop = 0; loop < que->no_ops; loop++) {
+ qlcnic_ind_wr(adapter, que->sel_addr, que_id);
+ addr = que->read_addr;
+ for (i = 0; i < cnt; i++) {
+ data = qlcnic_ind_rd(adapter, addr);
+ *buffer++ = cpu_to_le32(data);
+ addr += que->read_addr_stride;
+ }
+ que_id += que->stride;
+ }
+ return que->no_ops * cnt * sizeof(u32);
+}
+
+static u32 qlcnic_dump_ocm(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+ int i;
+ u32 data;
+ void __iomem *addr;
+ struct __ocm *ocm = &entry->region.ocm;
+
+ addr = adapter->ahw->pci_base0 + ocm->read_addr;
+ for (i = 0; i < ocm->no_ops; i++) {
+ data = readl(addr);
+ *buffer++ = cpu_to_le32(data);
+ addr += ocm->read_addr_stride;
+ }
+ return ocm->no_ops * sizeof(u32);
+}
+
+static u32 qlcnic_read_rom(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+ int i, count = 0;
+ u32 fl_addr, size, val, lck_val, addr;
+ struct __mem *rom = &entry->region.mem;
+
+ fl_addr = rom->addr;
+ size = rom->size / 4;
+lock_try:
+ lck_val = QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_LOCK);
+ if (!lck_val && count < MAX_CTL_CHECK) {
+ usleep_range(10000, 11000);
+ count++;
+ goto lock_try;
+ }
+ QLC_SHARED_REG_WR32(adapter, QLCNIC_FLASH_LOCK_OWNER,
+ adapter->ahw->pci_func);
+ for (i = 0; i < size; i++) {
+ addr = fl_addr & 0xFFFF0000;
+ qlcnic_ind_wr(adapter, FLASH_ROM_WINDOW, addr);
+ addr = LSW(fl_addr) + FLASH_ROM_DATA;
+ val = qlcnic_ind_rd(adapter, addr);
+ fl_addr += 4;
+ *buffer++ = cpu_to_le32(val);
+ }
+ QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_UNLOCK);
+ return rom->size;
+}
+
+static u32 qlcnic_dump_l1_cache(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+ int i;
+ u32 cnt, val, data, addr;
+ struct __cache *l1 = &entry->region.cache;
+
+ val = l1->init_tag_val;
+
+ for (i = 0; i < l1->no_ops; i++) {
+ qlcnic_ind_wr(adapter, l1->addr, val);
+ qlcnic_ind_wr(adapter, l1->ctrl_addr, LSW(l1->ctrl_val));
+ addr = l1->read_addr;
+ cnt = l1->read_addr_num;
+ while (cnt) {
+ data = qlcnic_ind_rd(adapter, addr);
+ *buffer++ = cpu_to_le32(data);
+ addr += l1->read_addr_stride;
+ cnt--;
+ }
+ val += l1->stride;
+ }
+ return l1->no_ops * l1->read_addr_num * sizeof(u32);
+}
+
+static u32 qlcnic_dump_l2_cache(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+ int i;
+ u32 cnt, val, data, addr;
+ u8 poll_mask, poll_to, time_out = 0;
+ struct __cache *l2 = &entry->region.cache;
+
+ val = l2->init_tag_val;
+ poll_mask = LSB(MSW(l2->ctrl_val));
+ poll_to = MSB(MSW(l2->ctrl_val));
+
+ for (i = 0; i < l2->no_ops; i++) {
+ qlcnic_ind_wr(adapter, l2->addr, val);
+ if (LSW(l2->ctrl_val))
+ qlcnic_ind_wr(adapter, l2->ctrl_addr,
+ LSW(l2->ctrl_val));
+ if (!poll_mask)
+ goto skip_poll;
+ do {
+ data = qlcnic_ind_rd(adapter, l2->ctrl_addr);
+ if (!(data & poll_mask))
+ break;
+ usleep_range(1000, 2000);
+ time_out++;
+ } while (time_out <= poll_to);
+
+ if (time_out > poll_to) {
+ dev_err(&adapter->pdev->dev,
+ "Timeout exceeded in %s, aborting dump\n",
+ __func__);
+ return -EINVAL;
+ }
+skip_poll:
+ addr = l2->read_addr;
+ cnt = l2->read_addr_num;
+ while (cnt) {
+ data = qlcnic_ind_rd(adapter, addr);
+ *buffer++ = cpu_to_le32(data);
+ addr += l2->read_addr_stride;
+ cnt--;
+ }
+ val += l2->stride;
+ }
+ return l2->no_ops * l2->read_addr_num * sizeof(u32);
+}
+
+static u32 qlcnic_read_memory_test_agent(struct qlcnic_adapter *adapter,
+ struct __mem *mem, __le32 *buffer,
+ int *ret)
+{
+ u32 addr, data, test;
+ int i, reg_read;
+
+ reg_read = mem->size;
+ addr = mem->addr;
+ /* check for data size of multiple of 16 and 16 byte alignment */
+ if ((addr & 0xf) || (reg_read%16)) {
+ dev_info(&adapter->pdev->dev,
+ "Unaligned memory addr:0x%x size:0x%x\n",
+ addr, reg_read);
+ *ret = -EINVAL;
+ return 0;
+ }
+
+ mutex_lock(&adapter->ahw->mem_lock);
+
+ while (reg_read != 0) {
+ qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_LO, addr);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_HI, 0);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_CTRL, QLCNIC_TA_START_ENABLE);
+
+ for (i = 0; i < MAX_CTL_CHECK; i++) {
+ test = qlcnic_ind_rd(adapter, QLCNIC_MS_CTRL);
+ if (!(test & TA_CTL_BUSY))
+ break;
+ }
+ if (i == MAX_CTL_CHECK) {
+ if (printk_ratelimit()) {
+ dev_err(&adapter->pdev->dev,
+ "failed to read through agent\n");
+ *ret = -EIO;
+ goto out;
+ }
+ }
+ for (i = 0; i < 4; i++) {
+ data = qlcnic_ind_rd(adapter, qlcnic_ms_read_data[i]);
+ *buffer++ = cpu_to_le32(data);
+ }
+ addr += 16;
+ reg_read -= 16;
+ ret += 16;
+ cond_resched();
+ }
+out:
+ mutex_unlock(&adapter->ahw->mem_lock);
+ return mem->size;
+}
+
+/* DMA register base address */
+#define QLC_DMA_REG_BASE_ADDR(dma_no) (0x77320000 + (dma_no * 0x10000))
+
+/* DMA register offsets w.r.t base address */
+#define QLC_DMA_CMD_BUFF_ADDR_LOW 0
+#define QLC_DMA_CMD_BUFF_ADDR_HI 4
+#define QLC_DMA_CMD_STATUS_CTRL 8
+
+static int qlcnic_start_pex_dma(struct qlcnic_adapter *adapter,
+ struct __mem *mem)
+{
+ struct device *dev = &adapter->pdev->dev;
+ u32 dma_no, dma_base_addr, temp_addr;
+ int i, ret, dma_sts;
+ void *tmpl_hdr;
+
+ tmpl_hdr = adapter->ahw->fw_dump.tmpl_hdr;
+ dma_no = qlcnic_get_saved_state(adapter, tmpl_hdr,
+ QLC_83XX_DMA_ENGINE_INDEX);
+ dma_base_addr = QLC_DMA_REG_BASE_ADDR(dma_no);
+
+ temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_LOW;
+ ret = qlcnic_ind_wr(adapter, temp_addr, mem->desc_card_addr);
+ if (ret)
+ return ret;
+
+ temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_HI;
+ ret = qlcnic_ind_wr(adapter, temp_addr, 0);
+ if (ret)
+ return ret;
+
+ temp_addr = dma_base_addr + QLC_DMA_CMD_STATUS_CTRL;
+ ret = qlcnic_ind_wr(adapter, temp_addr, mem->start_dma_cmd);
+ if (ret)
+ return ret;
+
+ /* Wait for DMA to complete */
+ temp_addr = dma_base_addr + QLC_DMA_CMD_STATUS_CTRL;
+ for (i = 0; i < 400; i++) {
+ dma_sts = qlcnic_ind_rd(adapter, temp_addr);
+
+ if (dma_sts & BIT_1)
+ usleep_range(250, 500);
+ else
+ break;
+ }
+
+ if (i >= 400) {
+ dev_info(dev, "PEX DMA operation timed out");
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+static u32 qlcnic_read_memory_pexdma(struct qlcnic_adapter *adapter,
+ struct __mem *mem,
+ __le32 *buffer, int *ret)
+{
+ struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+ u32 temp, dma_base_addr, size = 0, read_size = 0;
+ struct qlcnic_pex_dma_descriptor *dma_descr;
+ struct device *dev = &adapter->pdev->dev;
+ dma_addr_t dma_phys_addr;
+ void *dma_buffer;
+ void *tmpl_hdr;
+
+ tmpl_hdr = fw_dump->tmpl_hdr;
+
+ /* Check if DMA engine is available */
+ temp = qlcnic_get_saved_state(adapter, tmpl_hdr,
+ QLC_83XX_DMA_ENGINE_INDEX);
+ dma_base_addr = QLC_DMA_REG_BASE_ADDR(temp);
+ temp = qlcnic_ind_rd(adapter,
+ dma_base_addr + QLC_DMA_CMD_STATUS_CTRL);
+
+ if (!(temp & BIT_31)) {
+ dev_info(dev, "%s: DMA engine is not available\n", __func__);
+ *ret = -EIO;
+ return 0;
+ }
+
+ /* Create DMA descriptor */
+ dma_descr = kzalloc(sizeof(struct qlcnic_pex_dma_descriptor),
+ GFP_KERNEL);
+ if (!dma_descr) {
+ *ret = -ENOMEM;
+ return 0;
+ }
+
+ /* dma_desc_cmd 0:15 = 0
+ * dma_desc_cmd 16:19 = mem->dma_desc_cmd 0:3
+ * dma_desc_cmd 20:23 = pci function number
+ * dma_desc_cmd 24:31 = mem->dma_desc_cmd 8:15
+ */
+ dma_phys_addr = fw_dump->phys_addr;
+ dma_buffer = fw_dump->dma_buffer;
+ temp = 0;
+ temp = mem->dma_desc_cmd & 0xff0f;
+ temp |= (adapter->ahw->pci_func & 0xf) << 4;
+ dma_descr->dma_desc_cmd = (temp << 16) & 0xffff0000;
+ dma_descr->dma_bus_addr_low = LSD(dma_phys_addr);
+ dma_descr->dma_bus_addr_high = MSD(dma_phys_addr);
+ dma_descr->src_addr_high = 0;
+
+ /* Collect memory dump using multiple DMA operations if required */
+ while (read_size < mem->size) {
+ if (mem->size - read_size >= QLC_PEX_DMA_READ_SIZE)
+ size = QLC_PEX_DMA_READ_SIZE;
+ else
+ size = mem->size - read_size;
+
+ dma_descr->src_addr_low = mem->addr + read_size;
+ dma_descr->read_data_size = size;
+
+ /* Write DMA descriptor to MS memory*/
+ temp = sizeof(struct qlcnic_pex_dma_descriptor) / 16;
+ *ret = qlcnic_ms_mem_write128(adapter, mem->desc_card_addr,
+ (u32 *)dma_descr, temp);
+ if (*ret) {
+ dev_info(dev, "Failed to write DMA descriptor to MS memory at address 0x%x\n",
+ mem->desc_card_addr);
+ goto free_dma_descr;
+ }
+
+ *ret = qlcnic_start_pex_dma(adapter, mem);
+ if (*ret) {
+ dev_info(dev, "Failed to start PEX DMA operation\n");
+ goto free_dma_descr;
+ }
+
+ memcpy(buffer, dma_buffer, size);
+ buffer += size / 4;
+ read_size += size;
+ }
+
+free_dma_descr:
+ kfree(dma_descr);
+
+ return read_size;
+}
+
+static u32 qlcnic_read_memory(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+ struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+ struct device *dev = &adapter->pdev->dev;
+ struct __mem *mem = &entry->region.mem;
+ u32 data_size;
+ int ret = 0;
+
+ if (fw_dump->use_pex_dma) {
+ data_size = qlcnic_read_memory_pexdma(adapter, mem, buffer,
+ &ret);
+ if (ret)
+ dev_info(dev,
+ "Failed to read memory dump using PEX DMA: mask[0x%x]\n",
+ entry->hdr.mask);
+ else
+ return data_size;
+ }
+
+ data_size = qlcnic_read_memory_test_agent(adapter, mem, buffer, &ret);
+ if (ret) {
+ dev_info(dev,
+ "Failed to read memory dump using test agent method: mask[0x%x]\n",
+ entry->hdr.mask);
+ return 0;
+ } else {
+ return data_size;
+ }
+}
+
+static u32 qlcnic_dump_nop(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+ entry->hdr.flags |= QLCNIC_DUMP_SKIP;
+ return 0;
+}
+
+static int qlcnic_valid_dump_entry(struct device *dev,
+ struct qlcnic_dump_entry *entry, u32 size)
+{
+ int ret = 1;
+ if (size != entry->hdr.cap_size) {
+ dev_err(dev,
+ "Invalid entry, Type:%d\tMask:%d\tSize:%dCap_size:%d\n",
+ entry->hdr.type, entry->hdr.mask, size,
+ entry->hdr.cap_size);
+ ret = 0;
+ }
+ return ret;
+}
+
+static u32 qlcnic_read_pollrdmwr(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry,
+ __le32 *buffer)
+{
+ struct __pollrdmwr *poll = &entry->region.pollrdmwr;
+ u32 data, wait_count, poll_wait, temp;
+
+ poll_wait = poll->poll_wait;
+
+ qlcnic_ind_wr(adapter, poll->addr1, poll->val1);
+ wait_count = 0;
+
+ while (wait_count < poll_wait) {
+ data = qlcnic_ind_rd(adapter, poll->addr1);
+ if ((data & poll->poll_mask) != 0)
+ break;
+ wait_count++;
+ }
+
+ if (wait_count == poll_wait) {
+ dev_err(&adapter->pdev->dev,
+ "Timeout exceeded in %s, aborting dump\n",
+ __func__);
+ return 0;
+ }
+
+ data = qlcnic_ind_rd(adapter, poll->addr2) & poll->mod_mask;
+ qlcnic_ind_wr(adapter, poll->addr2, data);
+ qlcnic_ind_wr(adapter, poll->addr1, poll->val2);
+ wait_count = 0;
+
+ while (wait_count < poll_wait) {
+ temp = qlcnic_ind_rd(adapter, poll->addr1);
+ if ((temp & poll->poll_mask) != 0)
+ break;
+ wait_count++;
+ }
+
+ *buffer++ = cpu_to_le32(poll->addr2);
+ *buffer++ = cpu_to_le32(data);
+
+ return 2 * sizeof(u32);
+
+}
+
+static u32 qlcnic_read_pollrd(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+ struct __pollrd *pollrd = &entry->region.pollrd;
+ u32 data, wait_count, poll_wait, sel_val;
+ int i;
+
+ poll_wait = pollrd->poll_wait;
+ sel_val = pollrd->sel_val;
+
+ for (i = 0; i < pollrd->no_ops; i++) {
+ qlcnic_ind_wr(adapter, pollrd->sel_addr, sel_val);
+ wait_count = 0;
+ while (wait_count < poll_wait) {
+ data = qlcnic_ind_rd(adapter, pollrd->sel_addr);
+ if ((data & pollrd->poll_mask) != 0)
+ break;
+ wait_count++;
+ }
+
+ if (wait_count == poll_wait) {
+ dev_err(&adapter->pdev->dev,
+ "Timeout exceeded in %s, aborting dump\n",
+ __func__);
+ return 0;
+ }
+
+ data = qlcnic_ind_rd(adapter, pollrd->read_addr);
+ *buffer++ = cpu_to_le32(sel_val);
+ *buffer++ = cpu_to_le32(data);
+ sel_val += pollrd->sel_val_stride;
+ }
+ return pollrd->no_ops * (2 * sizeof(u32));
+}
+
+static u32 qlcnic_read_mux2(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+ struct __mux2 *mux2 = &entry->region.mux2;
+ u32 data;
+ u32 t_sel_val, sel_val1, sel_val2;
+ int i;
+
+ sel_val1 = mux2->sel_val1;
+ sel_val2 = mux2->sel_val2;
+
+ for (i = 0; i < mux2->no_ops; i++) {
+ qlcnic_ind_wr(adapter, mux2->sel_addr1, sel_val1);
+ t_sel_val = sel_val1 & mux2->sel_val_mask;
+ qlcnic_ind_wr(adapter, mux2->sel_addr2, t_sel_val);
+ data = qlcnic_ind_rd(adapter, mux2->read_addr);
+ *buffer++ = cpu_to_le32(t_sel_val);
+ *buffer++ = cpu_to_le32(data);
+ qlcnic_ind_wr(adapter, mux2->sel_addr1, sel_val2);
+ t_sel_val = sel_val2 & mux2->sel_val_mask;
+ qlcnic_ind_wr(adapter, mux2->sel_addr2, t_sel_val);
+ data = qlcnic_ind_rd(adapter, mux2->read_addr);
+ *buffer++ = cpu_to_le32(t_sel_val);
+ *buffer++ = cpu_to_le32(data);
+ sel_val1 += mux2->sel_val_stride;
+ sel_val2 += mux2->sel_val_stride;
+ }
+
+ return mux2->no_ops * (4 * sizeof(u32));
+}
+
+static u32 qlcnic_83xx_dump_rom(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry, __le32 *buffer)
+{
+ u32 fl_addr, size;
+ struct __mem *rom = &entry->region.mem;
+
+ fl_addr = rom->addr;
+ size = rom->size / 4;
+
+ if (!qlcnic_83xx_lockless_flash_read32(adapter, fl_addr,
+ (u8 *)buffer, size))
+ return rom->size;
+
+ return 0;
+}
+
+static const struct qlcnic_dump_operations qlcnic_fw_dump_ops[] = {
+ {QLCNIC_DUMP_NOP, qlcnic_dump_nop},
+ {QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb},
+ {QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux},
+ {QLCNIC_DUMP_QUEUE, qlcnic_dump_que},
+ {QLCNIC_DUMP_BRD_CONFIG, qlcnic_read_rom},
+ {QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm},
+ {QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl},
+ {QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache},
+ {QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache},
+ {QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache},
+ {QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache},
+ {QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache},
+ {QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache},
+ {QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache},
+ {QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache},
+ {QLCNIC_DUMP_READ_ROM, qlcnic_read_rom},
+ {QLCNIC_DUMP_READ_MEM, qlcnic_read_memory},
+ {QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl},
+ {QLCNIC_DUMP_TLHDR, qlcnic_dump_nop},
+ {QLCNIC_DUMP_RDEND, qlcnic_dump_nop},
+};
+
+static const struct qlcnic_dump_operations qlcnic_83xx_fw_dump_ops[] = {
+ {QLCNIC_DUMP_NOP, qlcnic_dump_nop},
+ {QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb},
+ {QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux},
+ {QLCNIC_DUMP_QUEUE, qlcnic_dump_que},
+ {QLCNIC_DUMP_BRD_CONFIG, qlcnic_83xx_dump_rom},
+ {QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm},
+ {QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl},
+ {QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache},
+ {QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache},
+ {QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache},
+ {QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache},
+ {QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache},
+ {QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache},
+ {QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache},
+ {QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache},
+ {QLCNIC_DUMP_POLL_RD, qlcnic_read_pollrd},
+ {QLCNIC_READ_MUX2, qlcnic_read_mux2},
+ {QLCNIC_READ_POLLRDMWR, qlcnic_read_pollrdmwr},
+ {QLCNIC_DUMP_READ_ROM, qlcnic_83xx_dump_rom},
+ {QLCNIC_DUMP_READ_MEM, qlcnic_read_memory},
+ {QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl},
+ {QLCNIC_DUMP_TLHDR, qlcnic_dump_nop},
+ {QLCNIC_DUMP_RDEND, qlcnic_dump_nop},
+};
+
+static uint32_t qlcnic_temp_checksum(uint32_t *temp_buffer, u32 temp_size)
+{
+ uint64_t sum = 0;
+ int count = temp_size / sizeof(uint32_t);
+ while (count-- > 0)
+ sum += *temp_buffer++;
+ while (sum >> 32)
+ sum = (sum & 0xFFFFFFFF) + (sum >> 32);
+ return ~sum;
+}
+
+static int qlcnic_fw_flash_get_minidump_temp(struct qlcnic_adapter *adapter,
+ u8 *buffer, u32 size)
+{
+ int ret = 0;
+
+ if (qlcnic_82xx_check(adapter))
+ return -EIO;
+
+ if (qlcnic_83xx_lock_flash(adapter))
+ return -EIO;
+
+ ret = qlcnic_83xx_lockless_flash_read32(adapter,
+ QLC_83XX_MINIDUMP_FLASH,
+ buffer, size / sizeof(u32));
+
+ qlcnic_83xx_unlock_flash(adapter);
+
+ return ret;
+}
+
+static int
+qlcnic_fw_flash_get_minidump_temp_size(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_83xx_dump_template_hdr tmp_hdr;
+ u32 size = sizeof(tmp_hdr) / sizeof(u32);
+ int ret = 0;
+
+ if (qlcnic_82xx_check(adapter))
+ return -EIO;
+
+ if (qlcnic_83xx_lock_flash(adapter))
+ return -EIO;
+
+ ret = qlcnic_83xx_lockless_flash_read32(adapter,
+ QLC_83XX_MINIDUMP_FLASH,
+ (u8 *)&tmp_hdr, size);
+
+ qlcnic_83xx_unlock_flash(adapter);
+
+ cmd->rsp.arg[2] = tmp_hdr.size;
+ cmd->rsp.arg[3] = tmp_hdr.version;
+
+ return ret;
+}
+
+static int qlcnic_fw_get_minidump_temp_size(struct qlcnic_adapter *adapter,
+ u32 *version, u32 *temp_size,
+ u8 *use_flash_temp)
+{
+ int err = 0;
+ struct qlcnic_cmd_args cmd;
+
+ if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_TEMP_SIZE))
+ return -ENOMEM;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err != QLCNIC_RCODE_SUCCESS) {
+ if (qlcnic_fw_flash_get_minidump_temp_size(adapter, &cmd)) {
+ qlcnic_free_mbx_args(&cmd);
+ return -EIO;
+ }
+ *use_flash_temp = 1;
+ }
+
+ *temp_size = cmd.rsp.arg[2];
+ *version = cmd.rsp.arg[3];
+ qlcnic_free_mbx_args(&cmd);
+
+ if (!(*temp_size))
+ return -EIO;
+
+ return 0;
+}
+
+static int __qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter,
+ u32 *buffer, u32 temp_size)
+{
+ int err = 0, i;
+ void *tmp_addr;
+ __le32 *tmp_buf;
+ struct qlcnic_cmd_args cmd;
+ dma_addr_t tmp_addr_t = 0;
+
+ tmp_addr = dma_alloc_coherent(&adapter->pdev->dev, temp_size,
+ &tmp_addr_t, GFP_KERNEL);
+ if (!tmp_addr)
+ return -ENOMEM;
+
+ if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_TEMP_HDR)) {
+ err = -ENOMEM;
+ goto free_mem;
+ }
+
+ cmd.req.arg[1] = LSD(tmp_addr_t);
+ cmd.req.arg[2] = MSD(tmp_addr_t);
+ cmd.req.arg[3] = temp_size;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+
+ tmp_buf = tmp_addr;
+ if (err == QLCNIC_RCODE_SUCCESS) {
+ for (i = 0; i < temp_size / sizeof(u32); i++)
+ *buffer++ = __le32_to_cpu(*tmp_buf++);
+ }
+
+ qlcnic_free_mbx_args(&cmd);
+
+free_mem:
+ dma_free_coherent(&adapter->pdev->dev, temp_size, tmp_addr, tmp_addr_t);
+
+ return err;
+}
+
+int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw;
+ struct qlcnic_fw_dump *fw_dump;
+ u32 version, csum, *tmp_buf;
+ u8 use_flash_temp = 0;
+ u32 temp_size = 0;
+ void *temp_buffer;
+ int err;
+
+ ahw = adapter->ahw;
+ fw_dump = &ahw->fw_dump;
+ err = qlcnic_fw_get_minidump_temp_size(adapter, &version, &temp_size,
+ &use_flash_temp);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Can't get template size %d\n", err);
+ return -EIO;
+ }
+
+ fw_dump->tmpl_hdr = vzalloc(temp_size);
+ if (!fw_dump->tmpl_hdr)
+ return -ENOMEM;
+
+ tmp_buf = (u32 *)fw_dump->tmpl_hdr;
+ if (use_flash_temp)
+ goto flash_temp;
+
+ err = __qlcnic_fw_cmd_get_minidump_temp(adapter, tmp_buf, temp_size);
+
+ if (err) {
+flash_temp:
+ err = qlcnic_fw_flash_get_minidump_temp(adapter, (u8 *)tmp_buf,
+ temp_size);
+
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to get minidump template header %d\n",
+ err);
+ vfree(fw_dump->tmpl_hdr);
+ fw_dump->tmpl_hdr = NULL;
+ return -EIO;
+ }
+ }
+
+ csum = qlcnic_temp_checksum((uint32_t *)tmp_buf, temp_size);
+
+ if (csum) {
+ dev_err(&adapter->pdev->dev,
+ "Template header checksum validation failed\n");
+ vfree(fw_dump->tmpl_hdr);
+ fw_dump->tmpl_hdr = NULL;
+ return -EIO;
+ }
+
+ qlcnic_cache_tmpl_hdr_values(adapter, fw_dump);
+
+ if (fw_dump->use_pex_dma) {
+ fw_dump->dma_buffer = NULL;
+ temp_buffer = dma_alloc_coherent(&adapter->pdev->dev,
+ QLC_PEX_DMA_READ_SIZE,
+ &fw_dump->phys_addr,
+ GFP_KERNEL);
+ if (!temp_buffer)
+ fw_dump->use_pex_dma = false;
+ else
+ fw_dump->dma_buffer = temp_buffer;
+ }
+
+
+ dev_info(&adapter->pdev->dev,
+ "Default minidump capture mask 0x%x\n",
+ fw_dump->cap_mask);
+
+ qlcnic_enable_fw_dump_state(adapter);
+
+ return 0;
+}
+
+int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+ const struct qlcnic_dump_operations *fw_dump_ops;
+ struct qlcnic_83xx_dump_template_hdr *hdr_83xx;
+ u32 entry_offset, dump, no_entries, buf_offset = 0;
+ int i, k, ops_cnt, ops_index, dump_size = 0;
+ struct device *dev = &adapter->pdev->dev;
+ struct qlcnic_hardware_context *ahw;
+ struct qlcnic_dump_entry *entry;
+ void *tmpl_hdr;
+ u32 ocm_window;
+ __le32 *buffer;
+ char mesg[64];
+ char *msg[] = {mesg, NULL};
+
+ ahw = adapter->ahw;
+ tmpl_hdr = fw_dump->tmpl_hdr;
+
+ /* Return if we don't have firmware dump template header */
+ if (!tmpl_hdr)
+ return -EIO;
+
+ if (!qlcnic_check_fw_dump_state(adapter)) {
+ dev_info(&adapter->pdev->dev, "Dump not enabled\n");
+ return -EIO;
+ }
+
+ if (fw_dump->clr) {
+ dev_info(&adapter->pdev->dev,
+ "Previous dump not cleared, not capturing dump\n");
+ return -EIO;
+ }
+
+ netif_info(adapter->ahw, drv, adapter->netdev, "Take FW dump\n");
+ /* Calculate the size for dump data area only */
+ for (i = 2, k = 1; (i & QLCNIC_DUMP_MASK_MAX); i <<= 1, k++)
+ if (i & fw_dump->cap_mask)
+ dump_size += qlcnic_get_cap_size(adapter, tmpl_hdr, k);
+
+ if (!dump_size)
+ return -EIO;
+
+ fw_dump->data = vzalloc(dump_size);
+ if (!fw_dump->data)
+ return -ENOMEM;
+
+ buffer = fw_dump->data;
+ fw_dump->size = dump_size;
+ no_entries = fw_dump->num_entries;
+ entry_offset = fw_dump->offset;
+ qlcnic_set_sys_info(adapter, tmpl_hdr, 0, QLCNIC_DRIVER_VERSION);
+ qlcnic_set_sys_info(adapter, tmpl_hdr, 1, adapter->fw_version);
+
+ if (qlcnic_82xx_check(adapter)) {
+ ops_cnt = ARRAY_SIZE(qlcnic_fw_dump_ops);
+ fw_dump_ops = qlcnic_fw_dump_ops;
+ } else {
+ hdr_83xx = tmpl_hdr;
+ ops_cnt = ARRAY_SIZE(qlcnic_83xx_fw_dump_ops);
+ fw_dump_ops = qlcnic_83xx_fw_dump_ops;
+ ocm_window = hdr_83xx->ocm_wnd_reg[ahw->pci_func];
+ hdr_83xx->saved_state[QLC_83XX_OCM_INDEX] = ocm_window;
+ hdr_83xx->saved_state[QLC_83XX_PCI_INDEX] = ahw->pci_func;
+ }
+
+ for (i = 0; i < no_entries; i++) {
+ entry = tmpl_hdr + entry_offset;
+ if (!(entry->hdr.mask & fw_dump->cap_mask)) {
+ entry->hdr.flags |= QLCNIC_DUMP_SKIP;
+ entry_offset += entry->hdr.offset;
+ continue;
+ }
+
+ /* Find the handler for this entry */
+ ops_index = 0;
+ while (ops_index < ops_cnt) {
+ if (entry->hdr.type == fw_dump_ops[ops_index].opcode)
+ break;
+ ops_index++;
+ }
+
+ if (ops_index == ops_cnt) {
+ dev_info(dev, "Skipping unknown entry opcode %d\n",
+ entry->hdr.type);
+ entry->hdr.flags |= QLCNIC_DUMP_SKIP;
+ entry_offset += entry->hdr.offset;
+ continue;
+ }
+
+ /* Collect dump for this entry */
+ dump = fw_dump_ops[ops_index].handler(adapter, entry, buffer);
+ if (!qlcnic_valid_dump_entry(dev, entry, dump)) {
+ entry->hdr.flags |= QLCNIC_DUMP_SKIP;
+ entry_offset += entry->hdr.offset;
+ continue;
+ }
+
+ buf_offset += entry->hdr.cap_size;
+ entry_offset += entry->hdr.offset;
+ buffer = fw_dump->data + buf_offset;
+ cond_resched();
+ }
+
+ fw_dump->clr = 1;
+ snprintf(mesg, sizeof(mesg), "FW_DUMP=%s", adapter->netdev->name);
+ netdev_info(adapter->netdev,
+ "Dump data %d bytes captured, dump data address = %p, template header size %d bytes, template address = %p\n",
+ fw_dump->size, fw_dump->data, fw_dump->tmpl_hdr_size,
+ fw_dump->tmpl_hdr);
+ /* Send a udev event to notify availability of FW dump */
+ kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, msg);
+
+ return 0;
+}
+
+static inline bool
+qlcnic_83xx_md_check_extended_dump_capability(struct qlcnic_adapter *adapter)
+{
+ /* For special adapters (with 0x8830 device ID), where iSCSI firmware
+ * dump needs to be captured as part of regular firmware dump
+ * collection process, firmware exports it's capability through
+ * capability registers
+ */
+ return ((adapter->pdev->device == PCI_DEVICE_ID_QLOGIC_QLE8830) &&
+ (adapter->ahw->extra_capability[0] &
+ QLCNIC_FW_CAPABILITY_2_EXT_ISCSI_DUMP));
+}
+
+void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter)
+{
+ u32 prev_version, current_version;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump;
+ struct pci_dev *pdev = adapter->pdev;
+ bool extended = false;
+ int ret;
+
+ prev_version = adapter->fw_version;
+ current_version = qlcnic_83xx_get_fw_version(adapter);
+
+ if (fw_dump->tmpl_hdr == NULL || current_version > prev_version) {
+ vfree(fw_dump->tmpl_hdr);
+ fw_dump->tmpl_hdr = NULL;
+
+ if (qlcnic_83xx_md_check_extended_dump_capability(adapter))
+ extended = !qlcnic_83xx_extend_md_capab(adapter);
+
+ ret = qlcnic_fw_cmd_get_minidump_temp(adapter);
+ if (ret)
+ return;
+
+ dev_info(&pdev->dev, "Supports FW dump capability\n");
+
+ /* Once we have minidump template with extended iSCSI dump
+ * capability, update the minidump capture mask to 0x1f as
+ * per FW requirement
+ */
+ if (extended) {
+ struct qlcnic_83xx_dump_template_hdr *hdr;
+
+ hdr = fw_dump->tmpl_hdr;
+ if (!hdr)
+ return;
+ hdr->drv_cap_mask = 0x1f;
+ fw_dump->cap_mask = 0x1f;
+ dev_info(&pdev->dev,
+ "Extended iSCSI dump capability and updated capture mask to 0x1f\n");
+ }
+ }
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
new file mode 100644
index 000000000..c42b99cd5
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
@@ -0,0 +1,277 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ */
+
+#ifndef _QLCNIC_83XX_SRIOV_H_
+#define _QLCNIC_83XX_SRIOV_H_
+
+#include <linux/types.h>
+#include <linux/pci.h>
+
+#include "qlcnic.h"
+
+extern const u32 qlcnic_83xx_reg_tbl[];
+extern const u32 qlcnic_83xx_ext_reg_tbl[];
+
+struct qlcnic_bc_payload {
+ u64 payload[126];
+};
+
+struct qlcnic_bc_hdr {
+#if defined(__LITTLE_ENDIAN)
+ u8 version;
+ u8 msg_type:4;
+ u8 rsvd1:3;
+ u8 op_type:1;
+ u8 num_cmds;
+ u8 num_frags;
+ u8 frag_num;
+ u8 cmd_op;
+ u16 seq_id;
+ u64 rsvd3;
+#elif defined(__BIG_ENDIAN)
+ u8 num_frags;
+ u8 num_cmds;
+ u8 op_type:1;
+ u8 rsvd1:3;
+ u8 msg_type:4;
+ u8 version;
+ u16 seq_id;
+ u8 cmd_op;
+ u8 frag_num;
+ u64 rsvd3;
+#endif
+};
+
+enum qlcnic_bc_commands {
+ QLCNIC_BC_CMD_CHANNEL_INIT = 0x0,
+ QLCNIC_BC_CMD_CHANNEL_TERM = 0x1,
+ QLCNIC_BC_CMD_GET_ACL = 0x2,
+ QLCNIC_BC_CMD_CFG_GUEST_VLAN = 0x3,
+};
+
+#define QLCNIC_83XX_SRIOV_VF_MAX_MAC 2
+#define QLC_BC_CMD 1
+
+struct qlcnic_trans_list {
+ /* Lock for manipulating list */
+ spinlock_t lock;
+ struct list_head wait_list;
+ int count;
+};
+
+enum qlcnic_trans_state {
+ QLC_INIT = 0,
+ QLC_WAIT_FOR_CHANNEL_FREE,
+ QLC_WAIT_FOR_RESP,
+ QLC_ABORT,
+ QLC_END,
+};
+
+struct qlcnic_bc_trans {
+ u8 func_id;
+ u8 active;
+ u8 curr_rsp_frag;
+ u8 curr_req_frag;
+ u16 cmd_id;
+ u16 req_pay_size;
+ u16 rsp_pay_size;
+ u32 trans_id;
+ enum qlcnic_trans_state trans_state;
+ struct list_head list;
+ struct qlcnic_bc_hdr *req_hdr;
+ struct qlcnic_bc_hdr *rsp_hdr;
+ struct qlcnic_bc_payload *req_pay;
+ struct qlcnic_bc_payload *rsp_pay;
+ struct completion resp_cmpl;
+ struct qlcnic_vf_info *vf;
+};
+
+enum qlcnic_vf_state {
+ QLC_BC_VF_SEND = 0,
+ QLC_BC_VF_RECV,
+ QLC_BC_VF_CHANNEL,
+ QLC_BC_VF_STATE,
+ QLC_BC_VF_FLR,
+ QLC_BC_VF_SOFT_FLR,
+};
+
+enum qlcnic_vlan_mode {
+ QLC_NO_VLAN_MODE = 0,
+ QLC_PVID_MODE,
+ QLC_GUEST_VLAN_MODE,
+};
+
+struct qlcnic_resources {
+ u16 num_tx_mac_filters;
+ u16 num_rx_ucast_mac_filters;
+ u16 num_rx_mcast_mac_filters;
+
+ u16 num_txvlan_keys;
+
+ u16 num_rx_queues;
+ u16 num_tx_queues;
+
+ u16 num_rx_buf_rings;
+ u16 num_rx_status_rings;
+
+ u16 num_destip;
+ u32 num_lro_flows_supported;
+ u16 max_local_ipv6_addrs;
+ u16 max_remote_ipv6_addrs;
+};
+
+struct qlcnic_vport {
+ u16 handle;
+ u16 max_tx_bw;
+ u16 min_tx_bw;
+ u16 pvid;
+ u8 vlan_mode;
+ u8 qos;
+ bool spoofchk;
+ u8 mac[6];
+};
+
+struct qlcnic_vf_info {
+ u8 pci_func;
+ u16 rx_ctx_id;
+ u16 tx_ctx_id;
+ u16 *sriov_vlans;
+ int num_vlan;
+ unsigned long state;
+ struct completion ch_free_cmpl;
+ struct work_struct trans_work;
+ struct work_struct flr_work;
+ /* It synchronizes commands sent from VF */
+ struct mutex send_cmd_lock;
+ struct qlcnic_bc_trans *send_cmd;
+ struct qlcnic_bc_trans *flr_trans;
+ struct qlcnic_trans_list rcv_act;
+ struct qlcnic_trans_list rcv_pend;
+ struct qlcnic_adapter *adapter;
+ struct qlcnic_vport *vp;
+ spinlock_t vlan_list_lock; /* Lock for VLAN list */
+};
+
+struct qlcnic_async_cmd {
+ struct list_head list;
+ struct qlcnic_cmd_args *cmd;
+};
+
+struct qlcnic_back_channel {
+ u16 trans_counter;
+ struct workqueue_struct *bc_trans_wq;
+ struct workqueue_struct *bc_async_wq;
+ struct workqueue_struct *bc_flr_wq;
+ struct qlcnic_adapter *adapter;
+ struct list_head async_cmd_list;
+ struct work_struct vf_async_work;
+ spinlock_t queue_lock; /* async_cmd_list queue lock */
+};
+
+struct qlcnic_sriov {
+ u16 vp_handle;
+ u8 num_vfs;
+ u8 any_vlan;
+ u8 vlan_mode;
+ u16 num_allowed_vlans;
+ u16 *allowed_vlans;
+ u16 vlan;
+ struct qlcnic_resources ff_max;
+ struct qlcnic_back_channel bc;
+ struct qlcnic_vf_info *vf_info;
+};
+
+int qlcnic_sriov_init(struct qlcnic_adapter *, int);
+void qlcnic_sriov_cleanup(struct qlcnic_adapter *);
+void __qlcnic_sriov_cleanup(struct qlcnic_adapter *);
+void qlcnic_sriov_vf_register_map(struct qlcnic_hardware_context *);
+int qlcnic_sriov_vf_init(struct qlcnic_adapter *);
+void qlcnic_sriov_vf_set_ops(struct qlcnic_adapter *);
+int qlcnic_sriov_func_to_index(struct qlcnic_adapter *, u8);
+void qlcnic_sriov_handle_bc_event(struct qlcnic_adapter *, u32);
+int qlcnic_sriov_cfg_bc_intr(struct qlcnic_adapter *, u8);
+void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *);
+void qlcnic_sriov_cleanup_list(struct qlcnic_trans_list *);
+int __qlcnic_sriov_add_act_list(struct qlcnic_sriov *, struct qlcnic_vf_info *,
+ struct qlcnic_bc_trans *);
+int qlcnic_sriov_get_vf_vport_info(struct qlcnic_adapter *,
+ struct qlcnic_info *, u16);
+int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *, u16, u8);
+void qlcnic_sriov_free_vlans(struct qlcnic_adapter *);
+int qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *);
+bool qlcnic_sriov_check_any_vlan(struct qlcnic_vf_info *);
+void qlcnic_sriov_del_vlan_id(struct qlcnic_sriov *,
+ struct qlcnic_vf_info *, u16);
+void qlcnic_sriov_add_vlan_id(struct qlcnic_sriov *,
+ struct qlcnic_vf_info *, u16);
+
+static inline bool qlcnic_sriov_enable_check(struct qlcnic_adapter *adapter)
+{
+ return test_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state) ? true : false;
+}
+
+#ifdef CONFIG_QLCNIC_SRIOV
+void qlcnic_sriov_pf_process_bc_cmd(struct qlcnic_adapter *,
+ struct qlcnic_bc_trans *,
+ struct qlcnic_cmd_args *);
+void qlcnic_sriov_pf_disable(struct qlcnic_adapter *);
+void qlcnic_sriov_pf_cleanup(struct qlcnic_adapter *);
+int qlcnic_pci_sriov_configure(struct pci_dev *, int);
+void qlcnic_pf_set_interface_id_create_rx_ctx(struct qlcnic_adapter *, u32 *);
+void qlcnic_pf_set_interface_id_create_tx_ctx(struct qlcnic_adapter *, u32 *);
+void qlcnic_pf_set_interface_id_del_rx_ctx(struct qlcnic_adapter *, u32 *);
+void qlcnic_pf_set_interface_id_del_tx_ctx(struct qlcnic_adapter *, u32 *);
+void qlcnic_pf_set_interface_id_promisc(struct qlcnic_adapter *, u32 *);
+void qlcnic_pf_set_interface_id_ipaddr(struct qlcnic_adapter *, u32 *);
+void qlcnic_pf_set_interface_id_macaddr(struct qlcnic_adapter *, u32 *);
+void qlcnic_sriov_pf_handle_flr(struct qlcnic_sriov *, struct qlcnic_vf_info *);
+bool qlcnic_sriov_soft_flr_check(struct qlcnic_adapter *,
+ struct qlcnic_bc_trans *,
+ struct qlcnic_vf_info *);
+void qlcnic_sriov_pf_reset(struct qlcnic_adapter *);
+int qlcnic_sriov_pf_reinit(struct qlcnic_adapter *);
+int qlcnic_sriov_set_vf_mac(struct net_device *, int, u8 *);
+int qlcnic_sriov_set_vf_tx_rate(struct net_device *, int, int, int);
+int qlcnic_sriov_get_vf_config(struct net_device *, int ,
+ struct ifla_vf_info *);
+int qlcnic_sriov_set_vf_vlan(struct net_device *, int, u16, u8, __be16);
+int qlcnic_sriov_set_vf_spoofchk(struct net_device *, int, bool);
+#else
+static inline void qlcnic_sriov_pf_disable(struct qlcnic_adapter *adapter) {}
+static inline void qlcnic_sriov_pf_cleanup(struct qlcnic_adapter *adapter) {}
+static inline void
+qlcnic_pf_set_interface_id_create_rx_ctx(struct qlcnic_adapter *adapter,
+ u32 *int_id) {}
+static inline void
+qlcnic_pf_set_interface_id_create_tx_ctx(struct qlcnic_adapter *adapter,
+ u32 *int_id) {}
+static inline void
+qlcnic_pf_set_interface_id_del_rx_ctx(struct qlcnic_adapter *adapter,
+ u32 *int_id) {}
+static inline void
+qlcnic_pf_set_interface_id_del_tx_ctx(struct qlcnic_adapter *adapter,
+ u32 *int_id) {}
+static inline void
+qlcnic_pf_set_interface_id_ipaddr(struct qlcnic_adapter *adapter, u32 *int_id)
+{}
+static inline void
+qlcnic_pf_set_interface_id_macaddr(struct qlcnic_adapter *adapter, u32 *int_id)
+{}
+static inline void
+qlcnic_pf_set_interface_id_promisc(struct qlcnic_adapter *adapter, u32 *int_id)
+{}
+static inline void qlcnic_sriov_pf_handle_flr(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf) {}
+static inline bool qlcnic_sriov_soft_flr_check(struct qlcnic_adapter *adapter,
+ struct qlcnic_bc_trans *trans,
+ struct qlcnic_vf_info *vf)
+{ return false; }
+static inline void qlcnic_sriov_pf_reset(struct qlcnic_adapter *adapter) {}
+static inline int qlcnic_sriov_pf_reinit(struct qlcnic_adapter *adapter)
+{ return 0; }
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
new file mode 100644
index 000000000..f9dd50152
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -0,0 +1,2229 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ */
+
+#include <linux/types.h>
+
+#include "qlcnic_sriov.h"
+#include "qlcnic.h"
+#include "qlcnic_83xx_hw.h"
+
+#define QLC_BC_COMMAND 0
+#define QLC_BC_RESPONSE 1
+
+#define QLC_MBOX_RESP_TIMEOUT (10 * HZ)
+#define QLC_MBOX_CH_FREE_TIMEOUT (10 * HZ)
+
+#define QLC_BC_MSG 0
+#define QLC_BC_CFREE 1
+#define QLC_BC_FLR 2
+#define QLC_BC_HDR_SZ 16
+#define QLC_BC_PAYLOAD_SZ (1024 - QLC_BC_HDR_SZ)
+
+#define QLC_DEFAULT_RCV_DESCRIPTORS_SRIOV_VF 2048
+#define QLC_DEFAULT_JUMBO_RCV_DESCRIPTORS_SRIOV_VF 512
+
+#define QLC_83XX_VF_RESET_FAIL_THRESH 8
+#define QLC_BC_CMD_MAX_RETRY_CNT 5
+
+static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work);
+static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *);
+static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *, u32);
+static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *);
+static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *);
+static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *);
+static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *,
+ struct qlcnic_cmd_args *);
+static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *, u8);
+static void qlcnic_sriov_process_bc_cmd(struct work_struct *);
+static int qlcnic_sriov_vf_shutdown(struct pci_dev *);
+static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *);
+static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *,
+ struct qlcnic_cmd_args *);
+
+static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = {
+ .read_crb = qlcnic_83xx_read_crb,
+ .write_crb = qlcnic_83xx_write_crb,
+ .read_reg = qlcnic_83xx_rd_reg_indirect,
+ .write_reg = qlcnic_83xx_wrt_reg_indirect,
+ .get_mac_address = qlcnic_83xx_get_mac_address,
+ .setup_intr = qlcnic_83xx_setup_intr,
+ .alloc_mbx_args = qlcnic_83xx_alloc_mbx_args,
+ .mbx_cmd = qlcnic_sriov_issue_cmd,
+ .get_func_no = qlcnic_83xx_get_func_no,
+ .api_lock = qlcnic_83xx_cam_lock,
+ .api_unlock = qlcnic_83xx_cam_unlock,
+ .process_lb_rcv_ring_diag = qlcnic_83xx_process_rcv_ring_diag,
+ .create_rx_ctx = qlcnic_83xx_create_rx_ctx,
+ .create_tx_ctx = qlcnic_83xx_create_tx_ctx,
+ .del_rx_ctx = qlcnic_83xx_del_rx_ctx,
+ .del_tx_ctx = qlcnic_83xx_del_tx_ctx,
+ .setup_link_event = qlcnic_83xx_setup_link_event,
+ .get_nic_info = qlcnic_83xx_get_nic_info,
+ .get_pci_info = qlcnic_83xx_get_pci_info,
+ .set_nic_info = qlcnic_83xx_set_nic_info,
+ .change_macvlan = qlcnic_83xx_sre_macaddr_change,
+ .napi_enable = qlcnic_83xx_napi_enable,
+ .napi_disable = qlcnic_83xx_napi_disable,
+ .config_intr_coal = qlcnic_83xx_config_intr_coal,
+ .config_rss = qlcnic_83xx_config_rss,
+ .config_hw_lro = qlcnic_83xx_config_hw_lro,
+ .config_promisc_mode = qlcnic_83xx_nic_set_promisc,
+ .change_l2_filter = qlcnic_83xx_change_l2_filter,
+ .get_board_info = qlcnic_83xx_get_port_info,
+ .free_mac_list = qlcnic_sriov_vf_free_mac_list,
+ .enable_sds_intr = qlcnic_83xx_enable_sds_intr,
+ .disable_sds_intr = qlcnic_83xx_disable_sds_intr,
+ .encap_rx_offload = qlcnic_83xx_encap_rx_offload,
+ .encap_tx_offload = qlcnic_83xx_encap_tx_offload,
+};
+
+static struct qlcnic_nic_template qlcnic_sriov_vf_ops = {
+ .config_bridged_mode = qlcnic_config_bridged_mode,
+ .config_led = qlcnic_config_led,
+ .cancel_idc_work = qlcnic_sriov_vf_cancel_fw_work,
+ .napi_add = qlcnic_83xx_napi_add,
+ .napi_del = qlcnic_83xx_napi_del,
+ .shutdown = qlcnic_sriov_vf_shutdown,
+ .resume = qlcnic_sriov_vf_resume,
+ .config_ipaddr = qlcnic_83xx_config_ipaddr,
+ .clear_legacy_intr = qlcnic_83xx_clear_legacy_intr,
+};
+
+static const struct qlcnic_mailbox_metadata qlcnic_sriov_bc_mbx_tbl[] = {
+ {QLCNIC_BC_CMD_CHANNEL_INIT, 2, 2},
+ {QLCNIC_BC_CMD_CHANNEL_TERM, 2, 2},
+ {QLCNIC_BC_CMD_GET_ACL, 3, 14},
+ {QLCNIC_BC_CMD_CFG_GUEST_VLAN, 2, 2},
+};
+
+static inline bool qlcnic_sriov_bc_msg_check(u32 val)
+{
+ return (val & (1 << QLC_BC_MSG)) ? true : false;
+}
+
+static inline bool qlcnic_sriov_channel_free_check(u32 val)
+{
+ return (val & (1 << QLC_BC_CFREE)) ? true : false;
+}
+
+static inline bool qlcnic_sriov_flr_check(u32 val)
+{
+ return (val & (1 << QLC_BC_FLR)) ? true : false;
+}
+
+static inline u8 qlcnic_sriov_target_func_id(u32 val)
+{
+ return (val >> 4) & 0xff;
+}
+
+static int qlcnic_sriov_virtid_fn(struct qlcnic_adapter *adapter, int vf_id)
+{
+ struct pci_dev *dev = adapter->pdev;
+ int pos;
+ u16 stride, offset;
+
+ if (qlcnic_sriov_vf_check(adapter))
+ return 0;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
+ if (!pos)
+ return 0;
+ pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset);
+ pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride);
+
+ return (dev->devfn + offset + stride * vf_id) & 0xff;
+}
+
+int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
+{
+ struct qlcnic_sriov *sriov;
+ struct qlcnic_back_channel *bc;
+ struct workqueue_struct *wq;
+ struct qlcnic_vport *vp;
+ struct qlcnic_vf_info *vf;
+ int err, i;
+
+ if (!qlcnic_sriov_enable_check(adapter))
+ return -EIO;
+
+ sriov = kzalloc(sizeof(struct qlcnic_sriov), GFP_KERNEL);
+ if (!sriov)
+ return -ENOMEM;
+
+ adapter->ahw->sriov = sriov;
+ sriov->num_vfs = num_vfs;
+ bc = &sriov->bc;
+ sriov->vf_info = kcalloc(num_vfs, sizeof(struct qlcnic_vf_info),
+ GFP_KERNEL);
+ if (!sriov->vf_info) {
+ err = -ENOMEM;
+ goto qlcnic_free_sriov;
+ }
+
+ wq = create_singlethread_workqueue("bc-trans");
+ if (wq == NULL) {
+ err = -ENOMEM;
+ dev_err(&adapter->pdev->dev,
+ "Cannot create bc-trans workqueue\n");
+ goto qlcnic_free_vf_info;
+ }
+
+ bc->bc_trans_wq = wq;
+
+ wq = create_singlethread_workqueue("async");
+ if (wq == NULL) {
+ err = -ENOMEM;
+ dev_err(&adapter->pdev->dev, "Cannot create async workqueue\n");
+ goto qlcnic_destroy_trans_wq;
+ }
+
+ bc->bc_async_wq = wq;
+ INIT_LIST_HEAD(&bc->async_cmd_list);
+ INIT_WORK(&bc->vf_async_work, qlcnic_sriov_handle_async_issue_cmd);
+ spin_lock_init(&bc->queue_lock);
+ bc->adapter = adapter;
+
+ for (i = 0; i < num_vfs; i++) {
+ vf = &sriov->vf_info[i];
+ vf->adapter = adapter;
+ vf->pci_func = qlcnic_sriov_virtid_fn(adapter, i);
+ mutex_init(&vf->send_cmd_lock);
+ spin_lock_init(&vf->vlan_list_lock);
+ INIT_LIST_HEAD(&vf->rcv_act.wait_list);
+ INIT_LIST_HEAD(&vf->rcv_pend.wait_list);
+ spin_lock_init(&vf->rcv_act.lock);
+ spin_lock_init(&vf->rcv_pend.lock);
+ init_completion(&vf->ch_free_cmpl);
+
+ INIT_WORK(&vf->trans_work, qlcnic_sriov_process_bc_cmd);
+
+ if (qlcnic_sriov_pf_check(adapter)) {
+ vp = kzalloc(sizeof(struct qlcnic_vport), GFP_KERNEL);
+ if (!vp) {
+ err = -ENOMEM;
+ goto qlcnic_destroy_async_wq;
+ }
+ sriov->vf_info[i].vp = vp;
+ vp->vlan_mode = QLC_GUEST_VLAN_MODE;
+ vp->max_tx_bw = MAX_BW;
+ vp->min_tx_bw = MIN_BW;
+ vp->spoofchk = false;
+ eth_random_addr(vp->mac);
+ dev_info(&adapter->pdev->dev,
+ "MAC Address %pM is configured for VF %d\n",
+ vp->mac, i);
+ }
+ }
+
+ return 0;
+
+qlcnic_destroy_async_wq:
+ while (i--)
+ kfree(sriov->vf_info[i].vp);
+ destroy_workqueue(bc->bc_async_wq);
+
+qlcnic_destroy_trans_wq:
+ destroy_workqueue(bc->bc_trans_wq);
+
+qlcnic_free_vf_info:
+ kfree(sriov->vf_info);
+
+qlcnic_free_sriov:
+ kfree(adapter->ahw->sriov);
+ return err;
+}
+
+void qlcnic_sriov_cleanup_list(struct qlcnic_trans_list *t_list)
+{
+ struct qlcnic_bc_trans *trans;
+ struct qlcnic_cmd_args cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&t_list->lock, flags);
+
+ while (!list_empty(&t_list->wait_list)) {
+ trans = list_first_entry(&t_list->wait_list,
+ struct qlcnic_bc_trans, list);
+ list_del(&trans->list);
+ t_list->count--;
+ cmd.req.arg = (u32 *)trans->req_pay;
+ cmd.rsp.arg = (u32 *)trans->rsp_pay;
+ qlcnic_free_mbx_args(&cmd);
+ qlcnic_sriov_cleanup_transaction(trans);
+ }
+
+ spin_unlock_irqrestore(&t_list->lock, flags);
+}
+
+void __qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_back_channel *bc = &sriov->bc;
+ struct qlcnic_vf_info *vf;
+ int i;
+
+ if (!qlcnic_sriov_enable_check(adapter))
+ return;
+
+ qlcnic_sriov_cleanup_async_list(bc);
+ destroy_workqueue(bc->bc_async_wq);
+
+ for (i = 0; i < sriov->num_vfs; i++) {
+ vf = &sriov->vf_info[i];
+ qlcnic_sriov_cleanup_list(&vf->rcv_pend);
+ cancel_work_sync(&vf->trans_work);
+ qlcnic_sriov_cleanup_list(&vf->rcv_act);
+ }
+
+ destroy_workqueue(bc->bc_trans_wq);
+
+ for (i = 0; i < sriov->num_vfs; i++)
+ kfree(sriov->vf_info[i].vp);
+
+ kfree(sriov->vf_info);
+ kfree(adapter->ahw->sriov);
+}
+
+static void qlcnic_sriov_vf_cleanup(struct qlcnic_adapter *adapter)
+{
+ qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
+ qlcnic_sriov_cfg_bc_intr(adapter, 0);
+ __qlcnic_sriov_cleanup(adapter);
+}
+
+void qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter)
+{
+ if (!test_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state))
+ return;
+
+ qlcnic_sriov_free_vlans(adapter);
+
+ if (qlcnic_sriov_pf_check(adapter))
+ qlcnic_sriov_pf_cleanup(adapter);
+
+ if (qlcnic_sriov_vf_check(adapter))
+ qlcnic_sriov_vf_cleanup(adapter);
+}
+
+static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr,
+ u32 *pay, u8 pci_func, u8 size)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_mailbox *mbx = ahw->mailbox;
+ struct qlcnic_cmd_args cmd;
+ unsigned long timeout;
+ int err;
+
+ memset(&cmd, 0, sizeof(struct qlcnic_cmd_args));
+ cmd.hdr = hdr;
+ cmd.pay = pay;
+ cmd.pay_size = size;
+ cmd.func_num = pci_func;
+ cmd.op_type = QLC_83XX_MBX_POST_BC_OP;
+ cmd.cmd_op = ((struct qlcnic_bc_hdr *)hdr)->cmd_op;
+
+ err = mbx->ops->enqueue_cmd(adapter, &cmd, &timeout);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Mailbox not available, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
+ __func__, cmd.cmd_op, cmd.type, ahw->pci_func,
+ ahw->op_mode);
+ return err;
+ }
+
+ if (!wait_for_completion_timeout(&cmd.completion, timeout)) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Mailbox command timed out, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
+ __func__, cmd.cmd_op, cmd.type, ahw->pci_func,
+ ahw->op_mode);
+ flush_workqueue(mbx->work_q);
+ }
+
+ return cmd.rsp_opcode;
+}
+
+static void qlcnic_sriov_vf_cfg_buff_desc(struct qlcnic_adapter *adapter)
+{
+ adapter->num_rxd = QLC_DEFAULT_RCV_DESCRIPTORS_SRIOV_VF;
+ adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G;
+ adapter->num_jumbo_rxd = QLC_DEFAULT_JUMBO_RCV_DESCRIPTORS_SRIOV_VF;
+ adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+ adapter->num_txd = MAX_CMD_DESCRIPTORS;
+ adapter->max_rds_rings = MAX_RDS_RINGS;
+}
+
+int qlcnic_sriov_get_vf_vport_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *npar_info, u16 vport_id)
+{
+ struct device *dev = &adapter->pdev->dev;
+ struct qlcnic_cmd_args cmd;
+ int err;
+ u32 status;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO);
+ if (err)
+ return err;
+
+ cmd.req.arg[1] = vport_id << 16 | 0x1;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to get vport info, err=%d\n", err);
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+ }
+
+ status = cmd.rsp.arg[2] & 0xffff;
+ if (status & BIT_0)
+ npar_info->min_tx_bw = MSW(cmd.rsp.arg[2]);
+ if (status & BIT_1)
+ npar_info->max_tx_bw = LSW(cmd.rsp.arg[3]);
+ if (status & BIT_2)
+ npar_info->max_tx_ques = MSW(cmd.rsp.arg[3]);
+ if (status & BIT_3)
+ npar_info->max_tx_mac_filters = LSW(cmd.rsp.arg[4]);
+ if (status & BIT_4)
+ npar_info->max_rx_mcast_mac_filters = MSW(cmd.rsp.arg[4]);
+ if (status & BIT_5)
+ npar_info->max_rx_ucast_mac_filters = LSW(cmd.rsp.arg[5]);
+ if (status & BIT_6)
+ npar_info->max_rx_ip_addr = MSW(cmd.rsp.arg[5]);
+ if (status & BIT_7)
+ npar_info->max_rx_lro_flow = LSW(cmd.rsp.arg[6]);
+ if (status & BIT_8)
+ npar_info->max_rx_status_rings = MSW(cmd.rsp.arg[6]);
+ if (status & BIT_9)
+ npar_info->max_rx_buf_rings = LSW(cmd.rsp.arg[7]);
+
+ npar_info->max_rx_ques = MSW(cmd.rsp.arg[7]);
+ npar_info->max_tx_vlan_keys = LSW(cmd.rsp.arg[8]);
+ npar_info->max_local_ipv6_addrs = MSW(cmd.rsp.arg[8]);
+ npar_info->max_remote_ipv6_addrs = LSW(cmd.rsp.arg[9]);
+
+ dev_info(dev, "\n\tmin_tx_bw: %d, max_tx_bw: %d max_tx_ques: %d,\n"
+ "\tmax_tx_mac_filters: %d max_rx_mcast_mac_filters: %d,\n"
+ "\tmax_rx_ucast_mac_filters: 0x%x, max_rx_ip_addr: %d,\n"
+ "\tmax_rx_lro_flow: %d max_rx_status_rings: %d,\n"
+ "\tmax_rx_buf_rings: %d, max_rx_ques: %d, max_tx_vlan_keys %d\n"
+ "\tlocal_ipv6_addr: %d, remote_ipv6_addr: %d\n",
+ npar_info->min_tx_bw, npar_info->max_tx_bw,
+ npar_info->max_tx_ques, npar_info->max_tx_mac_filters,
+ npar_info->max_rx_mcast_mac_filters,
+ npar_info->max_rx_ucast_mac_filters, npar_info->max_rx_ip_addr,
+ npar_info->max_rx_lro_flow, npar_info->max_rx_status_rings,
+ npar_info->max_rx_buf_rings, npar_info->max_rx_ques,
+ npar_info->max_tx_vlan_keys, npar_info->max_local_ipv6_addrs,
+ npar_info->max_remote_ipv6_addrs);
+
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+static int qlcnic_sriov_set_pvid_mode(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ adapter->rx_pvid = MSW(cmd->rsp.arg[1]) & 0xffff;
+ adapter->flags &= ~QLCNIC_TAGGING_ENABLED;
+ return 0;
+}
+
+static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ int i, num_vlans, ret;
+ u16 *vlans;
+
+ if (sriov->allowed_vlans)
+ return 0;
+
+ sriov->any_vlan = cmd->rsp.arg[2] & 0xf;
+ sriov->num_allowed_vlans = cmd->rsp.arg[2] >> 16;
+ dev_info(&adapter->pdev->dev, "Number of allowed Guest VLANs = %d\n",
+ sriov->num_allowed_vlans);
+
+ ret = qlcnic_sriov_alloc_vlans(adapter);
+ if (ret)
+ return ret;
+
+ if (!sriov->any_vlan)
+ return 0;
+
+ num_vlans = sriov->num_allowed_vlans;
+ sriov->allowed_vlans = kcalloc(num_vlans, sizeof(u16), GFP_KERNEL);
+ if (!sriov->allowed_vlans)
+ return -ENOMEM;
+
+ vlans = (u16 *)&cmd->rsp.arg[3];
+ for (i = 0; i < num_vlans; i++)
+ sriov->allowed_vlans[i] = vlans[i];
+
+ return 0;
+}
+
+static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_cmd_args cmd;
+ int ret = 0;
+
+ memset(&cmd, 0, sizeof(cmd));
+ ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd, QLCNIC_BC_CMD_GET_ACL);
+ if (ret)
+ return ret;
+
+ ret = qlcnic_issue_cmd(adapter, &cmd);
+ if (ret) {
+ dev_err(&adapter->pdev->dev, "Failed to get ACL, err=%d\n",
+ ret);
+ } else {
+ sriov->vlan_mode = cmd.rsp.arg[1] & 0x3;
+ switch (sriov->vlan_mode) {
+ case QLC_GUEST_VLAN_MODE:
+ ret = qlcnic_sriov_set_guest_vlan_mode(adapter, &cmd);
+ break;
+ case QLC_PVID_MODE:
+ ret = qlcnic_sriov_set_pvid_mode(adapter, &cmd);
+ break;
+ }
+ }
+
+ qlcnic_free_mbx_args(&cmd);
+ return ret;
+}
+
+static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_info nic_info;
+ int err;
+
+ err = qlcnic_sriov_get_vf_vport_info(adapter, &nic_info, 0);
+ if (err)
+ return err;
+
+ ahw->max_mc_count = nic_info.max_rx_mcast_mac_filters;
+
+ err = qlcnic_get_nic_info(adapter, &nic_info, ahw->pci_func);
+ if (err)
+ return -EIO;
+
+ if (qlcnic_83xx_get_port_info(adapter))
+ return -EIO;
+
+ qlcnic_sriov_vf_cfg_buff_desc(adapter);
+ adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
+ dev_info(&adapter->pdev->dev, "HAL Version: %d\n",
+ adapter->ahw->fw_hal_version);
+
+ ahw->physical_port = (u8) nic_info.phys_port;
+ ahw->switch_mode = nic_info.switch_mode;
+ ahw->max_mtu = nic_info.max_mtu;
+ ahw->op_mode = nic_info.op_mode;
+ ahw->capabilities = nic_info.capabilities;
+ return 0;
+}
+
+static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter)
+{
+ int err;
+
+ adapter->flags |= QLCNIC_VLAN_FILTERING;
+ adapter->ahw->total_nic_func = 1;
+ INIT_LIST_HEAD(&adapter->vf_mc_list);
+ if (!qlcnic_use_msi_x && !!qlcnic_use_msi)
+ dev_warn(&adapter->pdev->dev,
+ "Device does not support MSI interrupts\n");
+
+ /* compute and set default and max tx/sds rings */
+ qlcnic_set_tx_ring_count(adapter, QLCNIC_SINGLE_RING);
+ qlcnic_set_sds_ring_count(adapter, QLCNIC_SINGLE_RING);
+
+ err = qlcnic_setup_intr(adapter);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n");
+ goto err_out_disable_msi;
+ }
+
+ err = qlcnic_83xx_setup_mbx_intr(adapter);
+ if (err)
+ goto err_out_disable_msi;
+
+ err = qlcnic_sriov_init(adapter, 1);
+ if (err)
+ goto err_out_disable_mbx_intr;
+
+ err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
+ if (err)
+ goto err_out_cleanup_sriov;
+
+ err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT);
+ if (err)
+ goto err_out_disable_bc_intr;
+
+ err = qlcnic_sriov_vf_init_driver(adapter);
+ if (err)
+ goto err_out_send_channel_term;
+
+ err = qlcnic_sriov_get_vf_acl(adapter);
+ if (err)
+ goto err_out_send_channel_term;
+
+ err = qlcnic_setup_netdev(adapter, adapter->netdev);
+ if (err)
+ goto err_out_send_channel_term;
+
+ pci_set_drvdata(adapter->pdev, adapter);
+ dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
+ adapter->netdev->name);
+
+ qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state,
+ adapter->ahw->idc.delay);
+ return 0;
+
+err_out_send_channel_term:
+ qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
+
+err_out_disable_bc_intr:
+ qlcnic_sriov_cfg_bc_intr(adapter, 0);
+
+err_out_cleanup_sriov:
+ __qlcnic_sriov_cleanup(adapter);
+
+err_out_disable_mbx_intr:
+ qlcnic_83xx_free_mbx_intr(adapter);
+
+err_out_disable_msi:
+ qlcnic_teardown_intr(adapter);
+ return err;
+}
+
+static int qlcnic_sriov_check_dev_ready(struct qlcnic_adapter *adapter)
+{
+ u32 state;
+
+ do {
+ msleep(20);
+ if (++adapter->fw_fail_cnt > QLC_BC_CMD_MAX_RETRY_CNT)
+ return -EIO;
+ state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE);
+ } while (state != QLC_83XX_IDC_DEV_READY);
+
+ return 0;
+}
+
+int qlcnic_sriov_vf_init(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int err;
+
+ set_bit(QLC_83XX_MODULE_LOADED, &ahw->idc.status);
+ ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY;
+ ahw->reset_context = 0;
+ adapter->fw_fail_cnt = 0;
+ ahw->msix_supported = 1;
+ adapter->need_fw_reset = 0;
+ adapter->flags |= QLCNIC_TX_INTR_SHARED;
+
+ err = qlcnic_sriov_check_dev_ready(adapter);
+ if (err)
+ return err;
+
+ err = qlcnic_sriov_setup_vf(adapter);
+ if (err)
+ return err;
+
+ if (qlcnic_read_mac_addr(adapter))
+ dev_warn(&adapter->pdev->dev, "failed to read mac addr\n");
+
+ INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work);
+
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return 0;
+}
+
+void qlcnic_sriov_vf_set_ops(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ ahw->op_mode = QLCNIC_SRIOV_VF_FUNC;
+ dev_info(&adapter->pdev->dev,
+ "HAL Version: %d Non Privileged SRIOV function\n",
+ ahw->fw_hal_version);
+ adapter->nic_ops = &qlcnic_sriov_vf_ops;
+ set_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state);
+ return;
+}
+
+void qlcnic_sriov_vf_register_map(struct qlcnic_hardware_context *ahw)
+{
+ ahw->hw_ops = &qlcnic_sriov_vf_hw_ops;
+ ahw->reg_tbl = (u32 *)qlcnic_83xx_reg_tbl;
+ ahw->ext_reg_tbl = (u32 *)qlcnic_83xx_ext_reg_tbl;
+}
+
+static u32 qlcnic_sriov_get_bc_paysize(u32 real_pay_size, u8 curr_frag)
+{
+ u32 pay_size;
+
+ pay_size = real_pay_size / ((curr_frag + 1) * QLC_BC_PAYLOAD_SZ);
+
+ if (pay_size)
+ pay_size = QLC_BC_PAYLOAD_SZ;
+ else
+ pay_size = real_pay_size % QLC_BC_PAYLOAD_SZ;
+
+ return pay_size;
+}
+
+int qlcnic_sriov_func_to_index(struct qlcnic_adapter *adapter, u8 pci_func)
+{
+ struct qlcnic_vf_info *vf_info = adapter->ahw->sriov->vf_info;
+ u8 i;
+
+ if (qlcnic_sriov_vf_check(adapter))
+ return 0;
+
+ for (i = 0; i < adapter->ahw->sriov->num_vfs; i++) {
+ if (vf_info[i].pci_func == pci_func)
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+static inline int qlcnic_sriov_alloc_bc_trans(struct qlcnic_bc_trans **trans)
+{
+ *trans = kzalloc(sizeof(struct qlcnic_bc_trans), GFP_ATOMIC);
+ if (!*trans)
+ return -ENOMEM;
+
+ init_completion(&(*trans)->resp_cmpl);
+ return 0;
+}
+
+static inline int qlcnic_sriov_alloc_bc_msg(struct qlcnic_bc_hdr **hdr,
+ u32 size)
+{
+ *hdr = kcalloc(size, sizeof(struct qlcnic_bc_hdr), GFP_ATOMIC);
+ if (!*hdr)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *mbx, u32 type)
+{
+ const struct qlcnic_mailbox_metadata *mbx_tbl;
+ int i, size;
+
+ mbx_tbl = qlcnic_sriov_bc_mbx_tbl;
+ size = ARRAY_SIZE(qlcnic_sriov_bc_mbx_tbl);
+
+ for (i = 0; i < size; i++) {
+ if (type == mbx_tbl[i].cmd) {
+ mbx->op_type = QLC_BC_CMD;
+ mbx->req.num = mbx_tbl[i].in_args;
+ mbx->rsp.num = mbx_tbl[i].out_args;
+ mbx->req.arg = kcalloc(mbx->req.num, sizeof(u32),
+ GFP_ATOMIC);
+ if (!mbx->req.arg)
+ return -ENOMEM;
+ mbx->rsp.arg = kcalloc(mbx->rsp.num, sizeof(u32),
+ GFP_ATOMIC);
+ if (!mbx->rsp.arg) {
+ kfree(mbx->req.arg);
+ mbx->req.arg = NULL;
+ return -ENOMEM;
+ }
+ mbx->req.arg[0] = (type | (mbx->req.num << 16) |
+ (3 << 29));
+ mbx->rsp.arg[0] = (type & 0xffff) | mbx->rsp.num << 16;
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+static int qlcnic_sriov_prepare_bc_hdr(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd,
+ u16 seq, u8 msg_type)
+{
+ struct qlcnic_bc_hdr *hdr;
+ int i;
+ u32 num_regs, bc_pay_sz;
+ u16 remainder;
+ u8 cmd_op, num_frags, t_num_frags;
+
+ bc_pay_sz = QLC_BC_PAYLOAD_SZ;
+ if (msg_type == QLC_BC_COMMAND) {
+ trans->req_pay = (struct qlcnic_bc_payload *)cmd->req.arg;
+ trans->rsp_pay = (struct qlcnic_bc_payload *)cmd->rsp.arg;
+ num_regs = cmd->req.num;
+ trans->req_pay_size = (num_regs * 4);
+ num_regs = cmd->rsp.num;
+ trans->rsp_pay_size = (num_regs * 4);
+ cmd_op = cmd->req.arg[0] & 0xff;
+ remainder = (trans->req_pay_size) % (bc_pay_sz);
+ num_frags = (trans->req_pay_size) / (bc_pay_sz);
+ if (remainder)
+ num_frags++;
+ t_num_frags = num_frags;
+ if (qlcnic_sriov_alloc_bc_msg(&trans->req_hdr, num_frags))
+ return -ENOMEM;
+ remainder = (trans->rsp_pay_size) % (bc_pay_sz);
+ num_frags = (trans->rsp_pay_size) / (bc_pay_sz);
+ if (remainder)
+ num_frags++;
+ if (qlcnic_sriov_alloc_bc_msg(&trans->rsp_hdr, num_frags))
+ return -ENOMEM;
+ num_frags = t_num_frags;
+ hdr = trans->req_hdr;
+ } else {
+ cmd->req.arg = (u32 *)trans->req_pay;
+ cmd->rsp.arg = (u32 *)trans->rsp_pay;
+ cmd_op = cmd->req.arg[0] & 0xff;
+ cmd->cmd_op = cmd_op;
+ remainder = (trans->rsp_pay_size) % (bc_pay_sz);
+ num_frags = (trans->rsp_pay_size) / (bc_pay_sz);
+ if (remainder)
+ num_frags++;
+ cmd->req.num = trans->req_pay_size / 4;
+ cmd->rsp.num = trans->rsp_pay_size / 4;
+ hdr = trans->rsp_hdr;
+ cmd->op_type = trans->req_hdr->op_type;
+ }
+
+ trans->trans_id = seq;
+ trans->cmd_id = cmd_op;
+ for (i = 0; i < num_frags; i++) {
+ hdr[i].version = 2;
+ hdr[i].msg_type = msg_type;
+ hdr[i].op_type = cmd->op_type;
+ hdr[i].num_cmds = 1;
+ hdr[i].num_frags = num_frags;
+ hdr[i].frag_num = i + 1;
+ hdr[i].cmd_op = cmd_op;
+ hdr[i].seq_id = seq;
+ }
+ return 0;
+}
+
+static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *trans)
+{
+ if (!trans)
+ return;
+ kfree(trans->req_hdr);
+ kfree(trans->rsp_hdr);
+ kfree(trans);
+}
+
+static int qlcnic_sriov_clear_trans(struct qlcnic_vf_info *vf,
+ struct qlcnic_bc_trans *trans, u8 type)
+{
+ struct qlcnic_trans_list *t_list;
+ unsigned long flags;
+ int ret = 0;
+
+ if (type == QLC_BC_RESPONSE) {
+ t_list = &vf->rcv_act;
+ spin_lock_irqsave(&t_list->lock, flags);
+ t_list->count--;
+ list_del(&trans->list);
+ if (t_list->count > 0)
+ ret = 1;
+ spin_unlock_irqrestore(&t_list->lock, flags);
+ }
+ if (type == QLC_BC_COMMAND) {
+ while (test_and_set_bit(QLC_BC_VF_SEND, &vf->state))
+ msleep(100);
+ vf->send_cmd = NULL;
+ clear_bit(QLC_BC_VF_SEND, &vf->state);
+ }
+ return ret;
+}
+
+static void qlcnic_sriov_schedule_bc_cmd(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf,
+ work_func_t func)
+{
+ if (test_bit(QLC_BC_VF_FLR, &vf->state) ||
+ vf->adapter->need_fw_reset)
+ return;
+
+ queue_work(sriov->bc.bc_trans_wq, &vf->trans_work);
+}
+
+static inline void qlcnic_sriov_wait_for_resp(struct qlcnic_bc_trans *trans)
+{
+ struct completion *cmpl = &trans->resp_cmpl;
+
+ if (wait_for_completion_timeout(cmpl, QLC_MBOX_RESP_TIMEOUT))
+ trans->trans_state = QLC_END;
+ else
+ trans->trans_state = QLC_ABORT;
+
+ return;
+}
+
+static void qlcnic_sriov_handle_multi_frags(struct qlcnic_bc_trans *trans,
+ u8 type)
+{
+ if (type == QLC_BC_RESPONSE) {
+ trans->curr_rsp_frag++;
+ if (trans->curr_rsp_frag < trans->rsp_hdr->num_frags)
+ trans->trans_state = QLC_INIT;
+ else
+ trans->trans_state = QLC_END;
+ } else {
+ trans->curr_req_frag++;
+ if (trans->curr_req_frag < trans->req_hdr->num_frags)
+ trans->trans_state = QLC_INIT;
+ else
+ trans->trans_state = QLC_WAIT_FOR_RESP;
+ }
+}
+
+static void qlcnic_sriov_wait_for_channel_free(struct qlcnic_bc_trans *trans,
+ u8 type)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct completion *cmpl = &vf->ch_free_cmpl;
+
+ if (!wait_for_completion_timeout(cmpl, QLC_MBOX_CH_FREE_TIMEOUT)) {
+ trans->trans_state = QLC_ABORT;
+ return;
+ }
+
+ clear_bit(QLC_BC_VF_CHANNEL, &vf->state);
+ qlcnic_sriov_handle_multi_frags(trans, type);
+}
+
+static void qlcnic_sriov_pull_bc_msg(struct qlcnic_adapter *adapter,
+ u32 *hdr, u32 *pay, u32 size)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u8 i, max = 2, hdr_size, j;
+
+ hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
+ max = (size / sizeof(u32)) + hdr_size;
+
+ for (i = 2, j = 0; j < hdr_size; i++, j++)
+ *(hdr++) = readl(QLCNIC_MBX_FW(ahw, i));
+ for (; j < max; i++, j++)
+ *(pay++) = readl(QLCNIC_MBX_FW(ahw, i));
+}
+
+static int __qlcnic_sriov_issue_bc_post(struct qlcnic_vf_info *vf)
+{
+ int ret = -EBUSY;
+ u32 timeout = 10000;
+
+ do {
+ if (!test_and_set_bit(QLC_BC_VF_CHANNEL, &vf->state)) {
+ ret = 0;
+ break;
+ }
+ mdelay(1);
+ } while (--timeout);
+
+ return ret;
+}
+
+static int qlcnic_sriov_issue_bc_post(struct qlcnic_bc_trans *trans, u8 type)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ u32 pay_size;
+ u32 *hdr, *pay;
+ int ret;
+ u8 pci_func = trans->func_id;
+
+ if (__qlcnic_sriov_issue_bc_post(vf))
+ return -EBUSY;
+
+ if (type == QLC_BC_COMMAND) {
+ hdr = (u32 *)(trans->req_hdr + trans->curr_req_frag);
+ pay = (u32 *)(trans->req_pay + trans->curr_req_frag);
+ pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size,
+ trans->curr_req_frag);
+ pay_size = (pay_size / sizeof(u32));
+ } else {
+ hdr = (u32 *)(trans->rsp_hdr + trans->curr_rsp_frag);
+ pay = (u32 *)(trans->rsp_pay + trans->curr_rsp_frag);
+ pay_size = qlcnic_sriov_get_bc_paysize(trans->rsp_pay_size,
+ trans->curr_rsp_frag);
+ pay_size = (pay_size / sizeof(u32));
+ }
+
+ ret = qlcnic_sriov_post_bc_msg(vf->adapter, hdr, pay,
+ pci_func, pay_size);
+ return ret;
+}
+
+static int __qlcnic_sriov_send_bc_msg(struct qlcnic_bc_trans *trans,
+ struct qlcnic_vf_info *vf, u8 type)
+{
+ bool flag = true;
+ int err = -EIO;
+
+ while (flag) {
+ if (test_bit(QLC_BC_VF_FLR, &vf->state) ||
+ vf->adapter->need_fw_reset)
+ trans->trans_state = QLC_ABORT;
+
+ switch (trans->trans_state) {
+ case QLC_INIT:
+ trans->trans_state = QLC_WAIT_FOR_CHANNEL_FREE;
+ if (qlcnic_sriov_issue_bc_post(trans, type))
+ trans->trans_state = QLC_ABORT;
+ break;
+ case QLC_WAIT_FOR_CHANNEL_FREE:
+ qlcnic_sriov_wait_for_channel_free(trans, type);
+ break;
+ case QLC_WAIT_FOR_RESP:
+ qlcnic_sriov_wait_for_resp(trans);
+ break;
+ case QLC_END:
+ err = 0;
+ flag = false;
+ break;
+ case QLC_ABORT:
+ err = -EIO;
+ flag = false;
+ clear_bit(QLC_BC_VF_CHANNEL, &vf->state);
+ break;
+ default:
+ err = -EIO;
+ flag = false;
+ }
+ }
+ return err;
+}
+
+static int qlcnic_sriov_send_bc_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_bc_trans *trans, int pci_func)
+{
+ struct qlcnic_vf_info *vf;
+ int err, index = qlcnic_sriov_func_to_index(adapter, pci_func);
+
+ if (index < 0)
+ return -EIO;
+
+ vf = &adapter->ahw->sriov->vf_info[index];
+ trans->vf = vf;
+ trans->func_id = pci_func;
+
+ if (!test_bit(QLC_BC_VF_STATE, &vf->state)) {
+ if (qlcnic_sriov_pf_check(adapter))
+ return -EIO;
+ if (qlcnic_sriov_vf_check(adapter) &&
+ trans->cmd_id != QLCNIC_BC_CMD_CHANNEL_INIT)
+ return -EIO;
+ }
+
+ mutex_lock(&vf->send_cmd_lock);
+ vf->send_cmd = trans;
+ err = __qlcnic_sriov_send_bc_msg(trans, vf, QLC_BC_COMMAND);
+ qlcnic_sriov_clear_trans(vf, trans, QLC_BC_COMMAND);
+ mutex_unlock(&vf->send_cmd_lock);
+ return err;
+}
+
+static void __qlcnic_sriov_process_bc_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+#ifdef CONFIG_QLCNIC_SRIOV
+ if (qlcnic_sriov_pf_check(adapter)) {
+ qlcnic_sriov_pf_process_bc_cmd(adapter, trans, cmd);
+ return;
+ }
+#endif
+ cmd->rsp.arg[0] |= (0x9 << 25);
+ return;
+}
+
+static void qlcnic_sriov_process_bc_cmd(struct work_struct *work)
+{
+ struct qlcnic_vf_info *vf = container_of(work, struct qlcnic_vf_info,
+ trans_work);
+ struct qlcnic_bc_trans *trans = NULL;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ struct qlcnic_cmd_args cmd;
+ u8 req;
+
+ if (adapter->need_fw_reset)
+ return;
+
+ if (test_bit(QLC_BC_VF_FLR, &vf->state))
+ return;
+
+ memset(&cmd, 0, sizeof(struct qlcnic_cmd_args));
+ trans = list_first_entry(&vf->rcv_act.wait_list,
+ struct qlcnic_bc_trans, list);
+ adapter = vf->adapter;
+
+ if (qlcnic_sriov_prepare_bc_hdr(trans, &cmd, trans->req_hdr->seq_id,
+ QLC_BC_RESPONSE))
+ goto cleanup_trans;
+
+ __qlcnic_sriov_process_bc_cmd(adapter, trans, &cmd);
+ trans->trans_state = QLC_INIT;
+ __qlcnic_sriov_send_bc_msg(trans, vf, QLC_BC_RESPONSE);
+
+cleanup_trans:
+ qlcnic_free_mbx_args(&cmd);
+ req = qlcnic_sriov_clear_trans(vf, trans, QLC_BC_RESPONSE);
+ qlcnic_sriov_cleanup_transaction(trans);
+ if (req)
+ qlcnic_sriov_schedule_bc_cmd(adapter->ahw->sriov, vf,
+ qlcnic_sriov_process_bc_cmd);
+}
+
+static void qlcnic_sriov_handle_bc_resp(struct qlcnic_bc_hdr *hdr,
+ struct qlcnic_vf_info *vf)
+{
+ struct qlcnic_bc_trans *trans;
+ u32 pay_size;
+
+ if (test_and_set_bit(QLC_BC_VF_SEND, &vf->state))
+ return;
+
+ trans = vf->send_cmd;
+
+ if (trans == NULL)
+ goto clear_send;
+
+ if (trans->trans_id != hdr->seq_id)
+ goto clear_send;
+
+ pay_size = qlcnic_sriov_get_bc_paysize(trans->rsp_pay_size,
+ trans->curr_rsp_frag);
+ qlcnic_sriov_pull_bc_msg(vf->adapter,
+ (u32 *)(trans->rsp_hdr + trans->curr_rsp_frag),
+ (u32 *)(trans->rsp_pay + trans->curr_rsp_frag),
+ pay_size);
+ if (++trans->curr_rsp_frag < trans->rsp_hdr->num_frags)
+ goto clear_send;
+
+ complete(&trans->resp_cmpl);
+
+clear_send:
+ clear_bit(QLC_BC_VF_SEND, &vf->state);
+}
+
+int __qlcnic_sriov_add_act_list(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf,
+ struct qlcnic_bc_trans *trans)
+{
+ struct qlcnic_trans_list *t_list = &vf->rcv_act;
+
+ t_list->count++;
+ list_add_tail(&trans->list, &t_list->wait_list);
+ if (t_list->count == 1)
+ qlcnic_sriov_schedule_bc_cmd(sriov, vf,
+ qlcnic_sriov_process_bc_cmd);
+ return 0;
+}
+
+static int qlcnic_sriov_add_act_list(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf,
+ struct qlcnic_bc_trans *trans)
+{
+ struct qlcnic_trans_list *t_list = &vf->rcv_act;
+
+ spin_lock(&t_list->lock);
+
+ __qlcnic_sriov_add_act_list(sriov, vf, trans);
+
+ spin_unlock(&t_list->lock);
+ return 0;
+}
+
+static void qlcnic_sriov_handle_pending_trans(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf,
+ struct qlcnic_bc_hdr *hdr)
+{
+ struct qlcnic_bc_trans *trans = NULL;
+ struct list_head *node;
+ u32 pay_size, curr_frag;
+ u8 found = 0, active = 0;
+
+ spin_lock(&vf->rcv_pend.lock);
+ if (vf->rcv_pend.count > 0) {
+ list_for_each(node, &vf->rcv_pend.wait_list) {
+ trans = list_entry(node, struct qlcnic_bc_trans, list);
+ if (trans->trans_id == hdr->seq_id) {
+ found = 1;
+ break;
+ }
+ }
+ }
+
+ if (found) {
+ curr_frag = trans->curr_req_frag;
+ pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size,
+ curr_frag);
+ qlcnic_sriov_pull_bc_msg(vf->adapter,
+ (u32 *)(trans->req_hdr + curr_frag),
+ (u32 *)(trans->req_pay + curr_frag),
+ pay_size);
+ trans->curr_req_frag++;
+ if (trans->curr_req_frag >= hdr->num_frags) {
+ vf->rcv_pend.count--;
+ list_del(&trans->list);
+ active = 1;
+ }
+ }
+ spin_unlock(&vf->rcv_pend.lock);
+
+ if (active)
+ if (qlcnic_sriov_add_act_list(sriov, vf, trans))
+ qlcnic_sriov_cleanup_transaction(trans);
+
+ return;
+}
+
+static void qlcnic_sriov_handle_bc_cmd(struct qlcnic_sriov *sriov,
+ struct qlcnic_bc_hdr *hdr,
+ struct qlcnic_vf_info *vf)
+{
+ struct qlcnic_bc_trans *trans;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ struct qlcnic_cmd_args cmd;
+ u32 pay_size;
+ int err;
+ u8 cmd_op;
+
+ if (adapter->need_fw_reset)
+ return;
+
+ if (!test_bit(QLC_BC_VF_STATE, &vf->state) &&
+ hdr->op_type != QLC_BC_CMD &&
+ hdr->cmd_op != QLCNIC_BC_CMD_CHANNEL_INIT)
+ return;
+
+ if (hdr->frag_num > 1) {
+ qlcnic_sriov_handle_pending_trans(sriov, vf, hdr);
+ return;
+ }
+
+ memset(&cmd, 0, sizeof(struct qlcnic_cmd_args));
+ cmd_op = hdr->cmd_op;
+ if (qlcnic_sriov_alloc_bc_trans(&trans))
+ return;
+
+ if (hdr->op_type == QLC_BC_CMD)
+ err = qlcnic_sriov_alloc_bc_mbx_args(&cmd, cmd_op);
+ else
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, cmd_op);
+
+ if (err) {
+ qlcnic_sriov_cleanup_transaction(trans);
+ return;
+ }
+
+ cmd.op_type = hdr->op_type;
+ if (qlcnic_sriov_prepare_bc_hdr(trans, &cmd, hdr->seq_id,
+ QLC_BC_COMMAND)) {
+ qlcnic_free_mbx_args(&cmd);
+ qlcnic_sriov_cleanup_transaction(trans);
+ return;
+ }
+
+ pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size,
+ trans->curr_req_frag);
+ qlcnic_sriov_pull_bc_msg(vf->adapter,
+ (u32 *)(trans->req_hdr + trans->curr_req_frag),
+ (u32 *)(trans->req_pay + trans->curr_req_frag),
+ pay_size);
+ trans->func_id = vf->pci_func;
+ trans->vf = vf;
+ trans->trans_id = hdr->seq_id;
+ trans->curr_req_frag++;
+
+ if (qlcnic_sriov_soft_flr_check(adapter, trans, vf))
+ return;
+
+ if (trans->curr_req_frag == trans->req_hdr->num_frags) {
+ if (qlcnic_sriov_add_act_list(sriov, vf, trans)) {
+ qlcnic_free_mbx_args(&cmd);
+ qlcnic_sriov_cleanup_transaction(trans);
+ }
+ } else {
+ spin_lock(&vf->rcv_pend.lock);
+ list_add_tail(&trans->list, &vf->rcv_pend.wait_list);
+ vf->rcv_pend.count++;
+ spin_unlock(&vf->rcv_pend.lock);
+ }
+}
+
+static void qlcnic_sriov_handle_msg_event(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf)
+{
+ struct qlcnic_bc_hdr hdr;
+ u32 *ptr = (u32 *)&hdr;
+ u8 msg_type, i;
+
+ for (i = 2; i < 6; i++)
+ ptr[i - 2] = readl(QLCNIC_MBX_FW(vf->adapter->ahw, i));
+ msg_type = hdr.msg_type;
+
+ switch (msg_type) {
+ case QLC_BC_COMMAND:
+ qlcnic_sriov_handle_bc_cmd(sriov, &hdr, vf);
+ break;
+ case QLC_BC_RESPONSE:
+ qlcnic_sriov_handle_bc_resp(&hdr, vf);
+ break;
+ }
+}
+
+static void qlcnic_sriov_handle_flr_event(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf)
+{
+ struct qlcnic_adapter *adapter = vf->adapter;
+
+ if (qlcnic_sriov_pf_check(adapter))
+ qlcnic_sriov_pf_handle_flr(sriov, vf);
+ else
+ dev_err(&adapter->pdev->dev,
+ "Invalid event to VF. VF should not get FLR event\n");
+}
+
+void qlcnic_sriov_handle_bc_event(struct qlcnic_adapter *adapter, u32 event)
+{
+ struct qlcnic_vf_info *vf;
+ struct qlcnic_sriov *sriov;
+ int index;
+ u8 pci_func;
+
+ sriov = adapter->ahw->sriov;
+ pci_func = qlcnic_sriov_target_func_id(event);
+ index = qlcnic_sriov_func_to_index(adapter, pci_func);
+
+ if (index < 0)
+ return;
+
+ vf = &sriov->vf_info[index];
+ vf->pci_func = pci_func;
+
+ if (qlcnic_sriov_channel_free_check(event))
+ complete(&vf->ch_free_cmpl);
+
+ if (qlcnic_sriov_flr_check(event)) {
+ qlcnic_sriov_handle_flr_event(sriov, vf);
+ return;
+ }
+
+ if (qlcnic_sriov_bc_msg_check(event))
+ qlcnic_sriov_handle_msg_event(sriov, vf);
+}
+
+int qlcnic_sriov_cfg_bc_intr(struct qlcnic_adapter *adapter, u8 enable)
+{
+ struct qlcnic_cmd_args cmd;
+ int err;
+
+ if (!test_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state))
+ return 0;
+
+ if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_BC_EVENT_SETUP))
+ return -ENOMEM;
+
+ if (enable)
+ cmd.req.arg[1] = (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7);
+
+ err = qlcnic_83xx_issue_cmd(adapter, &cmd);
+
+ if (err != QLCNIC_RCODE_SUCCESS) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to %s bc events, err=%d\n",
+ (enable ? "enable" : "disable"), err);
+ }
+
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+static int qlcnic_sriov_retry_bc_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_bc_trans *trans)
+{
+ u8 max = QLC_BC_CMD_MAX_RETRY_CNT;
+ u32 state;
+
+ state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE);
+ if (state == QLC_83XX_IDC_DEV_READY) {
+ msleep(20);
+ clear_bit(QLC_BC_VF_CHANNEL, &trans->vf->state);
+ trans->trans_state = QLC_INIT;
+ if (++adapter->fw_fail_cnt > max)
+ return -EIO;
+ else
+ return 0;
+ }
+
+ return -EIO;
+}
+
+static int __qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_mailbox *mbx = ahw->mailbox;
+ struct device *dev = &adapter->pdev->dev;
+ struct qlcnic_bc_trans *trans;
+ int err;
+ u32 rsp_data, opcode, mbx_err_code, rsp;
+ u16 seq = ++adapter->ahw->sriov->bc.trans_counter;
+ u8 func = ahw->pci_func;
+
+ rsp = qlcnic_sriov_alloc_bc_trans(&trans);
+ if (rsp)
+ goto free_cmd;
+
+ rsp = qlcnic_sriov_prepare_bc_hdr(trans, cmd, seq, QLC_BC_COMMAND);
+ if (rsp)
+ goto cleanup_transaction;
+
+retry:
+ if (!test_bit(QLC_83XX_MBX_READY, &mbx->status)) {
+ rsp = -EIO;
+ QLCDB(adapter, DRV, "MBX not Ready!(cmd 0x%x) for VF 0x%x\n",
+ QLCNIC_MBX_RSP(cmd->req.arg[0]), func);
+ goto err_out;
+ }
+
+ err = qlcnic_sriov_send_bc_cmd(adapter, trans, func);
+ if (err) {
+ dev_err(dev, "MBX command 0x%x timed out for VF %d\n",
+ (cmd->req.arg[0] & 0xffff), func);
+ rsp = QLCNIC_RCODE_TIMEOUT;
+
+ /* After adapter reset PF driver may take some time to
+ * respond to VF's request. Retry request till maximum retries.
+ */
+ if ((trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) &&
+ !qlcnic_sriov_retry_bc_cmd(adapter, trans))
+ goto retry;
+
+ goto err_out;
+ }
+
+ rsp_data = cmd->rsp.arg[0];
+ mbx_err_code = QLCNIC_MBX_STATUS(rsp_data);
+ opcode = QLCNIC_MBX_RSP(cmd->req.arg[0]);
+
+ if ((mbx_err_code == QLCNIC_MBX_RSP_OK) ||
+ (mbx_err_code == QLCNIC_MBX_PORT_RSP_OK)) {
+ rsp = QLCNIC_RCODE_SUCCESS;
+ } else {
+ if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) {
+ rsp = QLCNIC_RCODE_SUCCESS;
+ } else {
+ rsp = mbx_err_code;
+ if (!rsp)
+ rsp = 1;
+
+ dev_err(dev,
+ "MBX command 0x%x failed with err:0x%x for VF %d\n",
+ opcode, mbx_err_code, func);
+ }
+ }
+
+err_out:
+ if (rsp == QLCNIC_RCODE_TIMEOUT) {
+ ahw->reset_context = 1;
+ adapter->need_fw_reset = 1;
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+ }
+
+cleanup_transaction:
+ qlcnic_sriov_cleanup_transaction(trans);
+
+free_cmd:
+ if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) {
+ qlcnic_free_mbx_args(cmd);
+ kfree(cmd);
+ }
+
+ return rsp;
+}
+
+
+static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT)
+ return qlcnic_sriov_async_issue_cmd(adapter, cmd);
+ else
+ return __qlcnic_sriov_issue_cmd(adapter, cmd);
+}
+
+static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *adapter, u8 cmd_op)
+{
+ struct qlcnic_cmd_args cmd;
+ struct qlcnic_vf_info *vf = &adapter->ahw->sriov->vf_info[0];
+ int ret;
+
+ memset(&cmd, 0, sizeof(cmd));
+ if (qlcnic_sriov_alloc_bc_mbx_args(&cmd, cmd_op))
+ return -ENOMEM;
+
+ ret = qlcnic_issue_cmd(adapter, &cmd);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "Failed bc channel %s %d\n", cmd_op ? "term" : "init",
+ ret);
+ goto out;
+ }
+
+ cmd_op = (cmd.rsp.arg[0] & 0xff);
+ if (cmd.rsp.arg[0] >> 25 == 2)
+ return 2;
+ if (cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT)
+ set_bit(QLC_BC_VF_STATE, &vf->state);
+ else
+ clear_bit(QLC_BC_VF_STATE, &vf->state);
+
+out:
+ qlcnic_free_mbx_args(&cmd);
+ return ret;
+}
+
+static void qlcnic_vf_add_mc_list(struct net_device *netdev, const u8 *mac,
+ enum qlcnic_mac_type mac_type)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_vf_info *vf;
+ u16 vlan_id;
+ int i;
+
+ vf = &adapter->ahw->sriov->vf_info[0];
+
+ if (!qlcnic_sriov_check_any_vlan(vf)) {
+ qlcnic_nic_add_mac(adapter, mac, 0, mac_type);
+ } else {
+ spin_lock(&vf->vlan_list_lock);
+ for (i = 0; i < sriov->num_allowed_vlans; i++) {
+ vlan_id = vf->sriov_vlans[i];
+ if (vlan_id)
+ qlcnic_nic_add_mac(adapter, mac, vlan_id,
+ mac_type);
+ }
+ spin_unlock(&vf->vlan_list_lock);
+ if (qlcnic_84xx_check(adapter))
+ qlcnic_nic_add_mac(adapter, mac, 0, mac_type);
+ }
+}
+
+void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc)
+{
+ struct list_head *head = &bc->async_cmd_list;
+ struct qlcnic_async_cmd *entry;
+
+ flush_workqueue(bc->bc_async_wq);
+ cancel_work_sync(&bc->vf_async_work);
+
+ spin_lock(&bc->queue_lock);
+ while (!list_empty(head)) {
+ entry = list_entry(head->next, struct qlcnic_async_cmd,
+ list);
+ list_del(&entry->list);
+ kfree(entry->cmd);
+ kfree(entry);
+ }
+ spin_unlock(&bc->queue_lock);
+}
+
+void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ static const u8 bcast_addr[ETH_ALEN] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ };
+ struct netdev_hw_addr *ha;
+ u32 mode = VPORT_MISS_MODE_DROP;
+
+ if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
+ return;
+
+ if (netdev->flags & IFF_PROMISC) {
+ if (!(adapter->flags & QLCNIC_PROMISC_DISABLED))
+ mode = VPORT_MISS_MODE_ACCEPT_ALL;
+ } else if ((netdev->flags & IFF_ALLMULTI) ||
+ (netdev_mc_count(netdev) > ahw->max_mc_count)) {
+ mode = VPORT_MISS_MODE_ACCEPT_MULTI;
+ } else {
+ qlcnic_vf_add_mc_list(netdev, bcast_addr, QLCNIC_BROADCAST_MAC);
+ if (!netdev_mc_empty(netdev)) {
+ qlcnic_flush_mcast_mac(adapter);
+ netdev_for_each_mc_addr(ha, netdev)
+ qlcnic_vf_add_mc_list(netdev, ha->addr,
+ QLCNIC_MULTICAST_MAC);
+ }
+ }
+
+ /* configure unicast MAC address, if there is not sufficient space
+ * to store all the unicast addresses then enable promiscuous mode
+ */
+ if (netdev_uc_count(netdev) > ahw->max_uc_count) {
+ mode = VPORT_MISS_MODE_ACCEPT_ALL;
+ } else if (!netdev_uc_empty(netdev)) {
+ netdev_for_each_uc_addr(ha, netdev)
+ qlcnic_vf_add_mc_list(netdev, ha->addr,
+ QLCNIC_UNICAST_MAC);
+ }
+
+ if (adapter->pdev->is_virtfn) {
+ if (mode == VPORT_MISS_MODE_ACCEPT_ALL &&
+ !adapter->fdb_mac_learn) {
+ qlcnic_alloc_lb_filters_mem(adapter);
+ adapter->drv_mac_learn = true;
+ adapter->rx_mac_learn = true;
+ } else {
+ adapter->drv_mac_learn = false;
+ adapter->rx_mac_learn = false;
+ }
+ }
+
+ qlcnic_nic_set_promisc(adapter, mode);
+}
+
+static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work)
+{
+ struct qlcnic_async_cmd *entry, *tmp;
+ struct qlcnic_back_channel *bc;
+ struct qlcnic_cmd_args *cmd;
+ struct list_head *head;
+ LIST_HEAD(del_list);
+
+ bc = container_of(work, struct qlcnic_back_channel, vf_async_work);
+ head = &bc->async_cmd_list;
+
+ spin_lock(&bc->queue_lock);
+ list_splice_init(head, &del_list);
+ spin_unlock(&bc->queue_lock);
+
+ list_for_each_entry_safe(entry, tmp, &del_list, list) {
+ list_del(&entry->list);
+ cmd = entry->cmd;
+ __qlcnic_sriov_issue_cmd(bc->adapter, cmd);
+ kfree(entry);
+ }
+
+ if (!list_empty(head))
+ queue_work(bc->bc_async_wq, &bc->vf_async_work);
+
+ return;
+}
+
+static struct qlcnic_async_cmd *
+qlcnic_sriov_alloc_async_cmd(struct qlcnic_back_channel *bc,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_async_cmd *entry = NULL;
+
+ entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
+ if (!entry)
+ return NULL;
+
+ entry->cmd = cmd;
+
+ spin_lock(&bc->queue_lock);
+ list_add_tail(&entry->list, &bc->async_cmd_list);
+ spin_unlock(&bc->queue_lock);
+
+ return entry;
+}
+
+static void qlcnic_sriov_schedule_async_cmd(struct qlcnic_back_channel *bc,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_async_cmd *entry = NULL;
+
+ entry = qlcnic_sriov_alloc_async_cmd(bc, cmd);
+ if (!entry) {
+ qlcnic_free_mbx_args(cmd);
+ kfree(cmd);
+ return;
+ }
+
+ queue_work(bc->bc_async_wq, &bc->vf_async_work);
+}
+
+static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+
+ struct qlcnic_back_channel *bc = &adapter->ahw->sriov->bc;
+
+ if (adapter->need_fw_reset)
+ return -EIO;
+
+ qlcnic_sriov_schedule_async_cmd(bc, cmd);
+
+ return 0;
+}
+
+static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter)
+{
+ int err;
+
+ adapter->need_fw_reset = 0;
+ qlcnic_83xx_reinit_mbx_work(adapter->ahw->mailbox);
+ qlcnic_83xx_enable_mbx_interrupt(adapter);
+
+ err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
+ if (err)
+ return err;
+
+ err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT);
+ if (err)
+ goto err_out_cleanup_bc_intr;
+
+ err = qlcnic_sriov_vf_init_driver(adapter);
+ if (err)
+ goto err_out_term_channel;
+
+ return 0;
+
+err_out_term_channel:
+ qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
+
+err_out_cleanup_bc_intr:
+ qlcnic_sriov_cfg_bc_intr(adapter, 0);
+ return err;
+}
+
+static void qlcnic_sriov_vf_attach(struct qlcnic_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ if (netif_running(netdev)) {
+ if (!qlcnic_up(adapter, netdev))
+ qlcnic_restore_indev_addr(netdev, NETDEV_UP);
+ }
+
+ netif_device_attach(netdev);
+}
+
+static void qlcnic_sriov_vf_detach(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_intrpt_config *intr_tbl = ahw->intr_tbl;
+ struct net_device *netdev = adapter->netdev;
+ u8 i, max_ints = ahw->num_msix - 1;
+
+ netif_device_detach(netdev);
+ qlcnic_83xx_detach_mailbox_work(adapter);
+ qlcnic_83xx_disable_mbx_intr(adapter);
+
+ if (netif_running(netdev))
+ qlcnic_down(adapter, netdev);
+
+ for (i = 0; i < max_ints; i++) {
+ intr_tbl[i].id = i;
+ intr_tbl[i].enabled = 0;
+ intr_tbl[i].src = 0;
+ }
+ ahw->reset_context = 0;
+}
+
+static int qlcnic_sriov_vf_handle_dev_ready(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct device *dev = &adapter->pdev->dev;
+ struct qlc_83xx_idc *idc = &ahw->idc;
+ u8 func = ahw->pci_func;
+ u32 state;
+
+ if ((idc->prev_state == QLC_83XX_IDC_DEV_NEED_RESET) ||
+ (idc->prev_state == QLC_83XX_IDC_DEV_INIT)) {
+ if (!qlcnic_sriov_vf_reinit_driver(adapter)) {
+ qlcnic_sriov_vf_attach(adapter);
+ adapter->fw_fail_cnt = 0;
+ dev_info(dev,
+ "%s: Reinitialization of VF 0x%x done after FW reset\n",
+ __func__, func);
+ } else {
+ dev_err(dev,
+ "%s: Reinitialization of VF 0x%x failed after FW reset\n",
+ __func__, func);
+ state = QLCRDX(ahw, QLC_83XX_IDC_DEV_STATE);
+ dev_info(dev, "Current state 0x%x after FW reset\n",
+ state);
+ }
+ }
+
+ return 0;
+}
+
+static int qlcnic_sriov_vf_handle_context_reset(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_mailbox *mbx = ahw->mailbox;
+ struct device *dev = &adapter->pdev->dev;
+ struct qlc_83xx_idc *idc = &ahw->idc;
+ u8 func = ahw->pci_func;
+ u32 state;
+
+ adapter->reset_ctx_cnt++;
+
+ /* Skip the context reset and check if FW is hung */
+ if (adapter->reset_ctx_cnt < 3) {
+ adapter->need_fw_reset = 1;
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+ dev_info(dev,
+ "Resetting context, wait here to check if FW is in failed state\n");
+ return 0;
+ }
+
+ /* Check if number of resets exceed the threshold.
+ * If it exceeds the threshold just fail the VF.
+ */
+ if (adapter->reset_ctx_cnt > QLC_83XX_VF_RESET_FAIL_THRESH) {
+ clear_bit(QLC_83XX_MODULE_LOADED, &idc->status);
+ adapter->tx_timeo_cnt = 0;
+ adapter->fw_fail_cnt = 0;
+ adapter->reset_ctx_cnt = 0;
+ qlcnic_sriov_vf_detach(adapter);
+ dev_err(dev,
+ "Device context resets have exceeded the threshold, device interface will be shutdown\n");
+ return -EIO;
+ }
+
+ dev_info(dev, "Resetting context of VF 0x%x\n", func);
+ dev_info(dev, "%s: Context reset count %d for VF 0x%x\n",
+ __func__, adapter->reset_ctx_cnt, func);
+ set_bit(__QLCNIC_RESETTING, &adapter->state);
+ adapter->need_fw_reset = 1;
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+ qlcnic_sriov_vf_detach(adapter);
+ adapter->need_fw_reset = 0;
+
+ if (!qlcnic_sriov_vf_reinit_driver(adapter)) {
+ qlcnic_sriov_vf_attach(adapter);
+ adapter->tx_timeo_cnt = 0;
+ adapter->reset_ctx_cnt = 0;
+ adapter->fw_fail_cnt = 0;
+ dev_info(dev, "Done resetting context for VF 0x%x\n", func);
+ } else {
+ dev_err(dev, "%s: Reinitialization of VF 0x%x failed\n",
+ __func__, func);
+ state = QLCRDX(ahw, QLC_83XX_IDC_DEV_STATE);
+ dev_info(dev, "%s: Current state 0x%x\n", __func__, state);
+ }
+
+ return 0;
+}
+
+static int qlcnic_sriov_vf_idc_ready_state(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int ret = 0;
+
+ if (ahw->idc.prev_state != QLC_83XX_IDC_DEV_READY)
+ ret = qlcnic_sriov_vf_handle_dev_ready(adapter);
+ else if (ahw->reset_context)
+ ret = qlcnic_sriov_vf_handle_context_reset(adapter);
+
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return ret;
+}
+
+static int qlcnic_sriov_vf_idc_failed_state(struct qlcnic_adapter *adapter)
+{
+ struct qlc_83xx_idc *idc = &adapter->ahw->idc;
+
+ dev_err(&adapter->pdev->dev, "Device is in failed state\n");
+ if (idc->prev_state == QLC_83XX_IDC_DEV_READY)
+ qlcnic_sriov_vf_detach(adapter);
+
+ clear_bit(QLC_83XX_MODULE_LOADED, &idc->status);
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return -EIO;
+}
+
+static int
+qlcnic_sriov_vf_idc_need_quiescent_state(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
+ struct qlc_83xx_idc *idc = &adapter->ahw->idc;
+
+ dev_info(&adapter->pdev->dev, "Device is in quiescent state\n");
+ if (idc->prev_state == QLC_83XX_IDC_DEV_READY) {
+ set_bit(__QLCNIC_RESETTING, &adapter->state);
+ adapter->tx_timeo_cnt = 0;
+ adapter->reset_ctx_cnt = 0;
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+ qlcnic_sriov_vf_detach(adapter);
+ }
+
+ return 0;
+}
+
+static int qlcnic_sriov_vf_idc_init_reset_state(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
+ struct qlc_83xx_idc *idc = &adapter->ahw->idc;
+ u8 func = adapter->ahw->pci_func;
+
+ if (idc->prev_state == QLC_83XX_IDC_DEV_READY) {
+ dev_err(&adapter->pdev->dev,
+ "Firmware hang detected by VF 0x%x\n", func);
+ set_bit(__QLCNIC_RESETTING, &adapter->state);
+ adapter->tx_timeo_cnt = 0;
+ adapter->reset_ctx_cnt = 0;
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+ qlcnic_sriov_vf_detach(adapter);
+ }
+ return 0;
+}
+
+static int qlcnic_sriov_vf_idc_unknown_state(struct qlcnic_adapter *adapter)
+{
+ dev_err(&adapter->pdev->dev, "%s: Device in unknown state\n", __func__);
+ return 0;
+}
+
+static void qlcnic_sriov_vf_periodic_tasks(struct qlcnic_adapter *adapter)
+{
+ if (adapter->fhash.fnum)
+ qlcnic_prune_lb_filters(adapter);
+}
+
+static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *work)
+{
+ struct qlcnic_adapter *adapter;
+ struct qlc_83xx_idc *idc;
+ int ret = 0;
+
+ adapter = container_of(work, struct qlcnic_adapter, fw_work.work);
+ idc = &adapter->ahw->idc;
+ idc->curr_state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE);
+
+ switch (idc->curr_state) {
+ case QLC_83XX_IDC_DEV_READY:
+ ret = qlcnic_sriov_vf_idc_ready_state(adapter);
+ break;
+ case QLC_83XX_IDC_DEV_NEED_RESET:
+ case QLC_83XX_IDC_DEV_INIT:
+ ret = qlcnic_sriov_vf_idc_init_reset_state(adapter);
+ break;
+ case QLC_83XX_IDC_DEV_NEED_QUISCENT:
+ ret = qlcnic_sriov_vf_idc_need_quiescent_state(adapter);
+ break;
+ case QLC_83XX_IDC_DEV_FAILED:
+ ret = qlcnic_sriov_vf_idc_failed_state(adapter);
+ break;
+ case QLC_83XX_IDC_DEV_QUISCENT:
+ break;
+ default:
+ ret = qlcnic_sriov_vf_idc_unknown_state(adapter);
+ }
+
+ idc->prev_state = idc->curr_state;
+ qlcnic_sriov_vf_periodic_tasks(adapter);
+
+ if (!ret && test_bit(QLC_83XX_MODULE_LOADED, &idc->status))
+ qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state,
+ idc->delay);
+}
+
+static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *adapter)
+{
+ while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ msleep(20);
+
+ clear_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status);
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ cancel_delayed_work_sync(&adapter->fw_work);
+}
+
+static int qlcnic_sriov_check_vlan_id(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf, u16 vlan_id)
+{
+ int i, err = -EINVAL;
+
+ if (!vf->sriov_vlans)
+ return err;
+
+ spin_lock_bh(&vf->vlan_list_lock);
+
+ for (i = 0; i < sriov->num_allowed_vlans; i++) {
+ if (vf->sriov_vlans[i] == vlan_id) {
+ err = 0;
+ break;
+ }
+ }
+
+ spin_unlock_bh(&vf->vlan_list_lock);
+ return err;
+}
+
+static int qlcnic_sriov_validate_num_vlans(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf)
+{
+ int err = 0;
+
+ spin_lock_bh(&vf->vlan_list_lock);
+
+ if (vf->num_vlan >= sriov->num_allowed_vlans)
+ err = -EINVAL;
+
+ spin_unlock_bh(&vf->vlan_list_lock);
+ return err;
+}
+
+static int qlcnic_sriov_validate_vlan_cfg(struct qlcnic_adapter *adapter,
+ u16 vid, u8 enable)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_vf_info *vf;
+ bool vlan_exist;
+ u8 allowed = 0;
+ int i;
+
+ vf = &adapter->ahw->sriov->vf_info[0];
+ vlan_exist = qlcnic_sriov_check_any_vlan(vf);
+ if (sriov->vlan_mode != QLC_GUEST_VLAN_MODE)
+ return -EINVAL;
+
+ if (enable) {
+ if (qlcnic_83xx_vf_check(adapter) && vlan_exist)
+ return -EINVAL;
+
+ if (qlcnic_sriov_validate_num_vlans(sriov, vf))
+ return -EINVAL;
+
+ if (sriov->any_vlan) {
+ for (i = 0; i < sriov->num_allowed_vlans; i++) {
+ if (sriov->allowed_vlans[i] == vid)
+ allowed = 1;
+ }
+
+ if (!allowed)
+ return -EINVAL;
+ }
+ } else {
+ if (!vlan_exist || qlcnic_sriov_check_vlan_id(sriov, vf, vid))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void qlcnic_sriov_vlan_operation(struct qlcnic_vf_info *vf, u16 vlan_id,
+ enum qlcnic_vlan_operations opcode)
+{
+ struct qlcnic_adapter *adapter = vf->adapter;
+ struct qlcnic_sriov *sriov;
+
+ sriov = adapter->ahw->sriov;
+
+ if (!vf->sriov_vlans)
+ return;
+
+ spin_lock_bh(&vf->vlan_list_lock);
+
+ switch (opcode) {
+ case QLC_VLAN_ADD:
+ qlcnic_sriov_add_vlan_id(sriov, vf, vlan_id);
+ break;
+ case QLC_VLAN_DELETE:
+ qlcnic_sriov_del_vlan_id(sriov, vf, vlan_id);
+ break;
+ default:
+ netdev_err(adapter->netdev, "Invalid VLAN operation\n");
+ }
+
+ spin_unlock_bh(&vf->vlan_list_lock);
+ return;
+}
+
+int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *adapter,
+ u16 vid, u8 enable)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct net_device *netdev = adapter->netdev;
+ struct qlcnic_vf_info *vf;
+ struct qlcnic_cmd_args cmd;
+ int ret;
+
+ memset(&cmd, 0, sizeof(cmd));
+ if (vid == 0)
+ return 0;
+
+ vf = &adapter->ahw->sriov->vf_info[0];
+ ret = qlcnic_sriov_validate_vlan_cfg(adapter, vid, enable);
+ if (ret)
+ return ret;
+
+ ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd,
+ QLCNIC_BC_CMD_CFG_GUEST_VLAN);
+ if (ret)
+ return ret;
+
+ cmd.req.arg[1] = (enable & 1) | vid << 16;
+
+ qlcnic_sriov_cleanup_async_list(&sriov->bc);
+ ret = qlcnic_issue_cmd(adapter, &cmd);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to configure guest VLAN, err=%d\n", ret);
+ } else {
+ netif_addr_lock_bh(netdev);
+ qlcnic_free_mac_list(adapter);
+ netif_addr_unlock_bh(netdev);
+
+ if (enable)
+ qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_ADD);
+ else
+ qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_DELETE);
+
+ netif_addr_lock_bh(netdev);
+ qlcnic_set_multi(netdev);
+ netif_addr_unlock_bh(netdev);
+ }
+
+ qlcnic_free_mbx_args(&cmd);
+ return ret;
+}
+
+static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *adapter)
+{
+ struct list_head *head = &adapter->mac_list;
+ struct qlcnic_mac_vlan_list *cur;
+
+ while (!list_empty(head)) {
+ cur = list_entry(head->next, struct qlcnic_mac_vlan_list, list);
+ qlcnic_sre_macaddr_change(adapter, cur->mac_addr, cur->vlan_id,
+ QLCNIC_MAC_DEL);
+ list_del(&cur->list);
+ kfree(cur);
+ }
+}
+
+
+static int qlcnic_sriov_vf_shutdown(struct pci_dev *pdev)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+
+ netif_device_detach(netdev);
+ qlcnic_cancel_idc_work(adapter);
+
+ if (netif_running(netdev))
+ qlcnic_down(adapter, netdev);
+
+ qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
+ qlcnic_sriov_cfg_bc_intr(adapter, 0);
+ qlcnic_83xx_disable_mbx_intr(adapter);
+ cancel_delayed_work_sync(&adapter->idc_aen_work);
+
+ return pci_save_state(pdev);
+}
+
+static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *adapter)
+{
+ struct qlc_83xx_idc *idc = &adapter->ahw->idc;
+ struct net_device *netdev = adapter->netdev;
+ int err;
+
+ set_bit(QLC_83XX_MODULE_LOADED, &idc->status);
+ qlcnic_83xx_enable_mbx_interrupt(adapter);
+ err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
+ if (err)
+ return err;
+
+ err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT);
+ if (!err) {
+ if (netif_running(netdev)) {
+ err = qlcnic_up(adapter, netdev);
+ if (!err)
+ qlcnic_restore_indev_addr(netdev, NETDEV_UP);
+ }
+ }
+
+ netif_device_attach(netdev);
+ qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state,
+ idc->delay);
+ return err;
+}
+
+int qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_vf_info *vf;
+ int i;
+
+ for (i = 0; i < sriov->num_vfs; i++) {
+ vf = &sriov->vf_info[i];
+ vf->sriov_vlans = kcalloc(sriov->num_allowed_vlans,
+ sizeof(*vf->sriov_vlans), GFP_KERNEL);
+ if (!vf->sriov_vlans)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void qlcnic_sriov_free_vlans(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_vf_info *vf;
+ int i;
+
+ for (i = 0; i < sriov->num_vfs; i++) {
+ vf = &sriov->vf_info[i];
+ kfree(vf->sriov_vlans);
+ vf->sriov_vlans = NULL;
+ }
+}
+
+void qlcnic_sriov_add_vlan_id(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf, u16 vlan_id)
+{
+ int i;
+
+ for (i = 0; i < sriov->num_allowed_vlans; i++) {
+ if (!vf->sriov_vlans[i]) {
+ vf->sriov_vlans[i] = vlan_id;
+ vf->num_vlan++;
+ return;
+ }
+ }
+}
+
+void qlcnic_sriov_del_vlan_id(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf, u16 vlan_id)
+{
+ int i;
+
+ for (i = 0; i < sriov->num_allowed_vlans; i++) {
+ if (vf->sriov_vlans[i] == vlan_id) {
+ vf->sriov_vlans[i] = 0;
+ vf->num_vlan--;
+ return;
+ }
+ }
+}
+
+bool qlcnic_sriov_check_any_vlan(struct qlcnic_vf_info *vf)
+{
+ bool err = false;
+
+ spin_lock_bh(&vf->vlan_list_lock);
+
+ if (vf->num_vlan)
+ err = true;
+
+ spin_unlock_bh(&vf->vlan_list_lock);
+ return err;
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
new file mode 100644
index 000000000..8dd7aa08e
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -0,0 +1,2046 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ */
+
+#include <linux/types.h>
+
+#include "qlcnic_sriov.h"
+#include "qlcnic.h"
+
+#define QLCNIC_SRIOV_VF_MAX_MAC 7
+#define QLC_VF_MIN_TX_RATE 100
+#define QLC_VF_MAX_TX_RATE 9999
+#define QLC_MAC_OPCODE_MASK 0x7
+#define QLC_VF_FLOOD_BIT BIT_16
+#define QLC_FLOOD_MODE 0x5
+#define QLC_SRIOV_ALLOW_VLAN0 BIT_19
+#define QLC_INTR_COAL_TYPE_MASK 0x7
+
+static int qlcnic_sriov_pf_get_vport_handle(struct qlcnic_adapter *, u8);
+
+struct qlcnic_sriov_cmd_handler {
+ int (*fn) (struct qlcnic_bc_trans *, struct qlcnic_cmd_args *);
+};
+
+struct qlcnic_sriov_fw_cmd_handler {
+ u32 cmd;
+ int (*fn) (struct qlcnic_bc_trans *, struct qlcnic_cmd_args *);
+};
+
+static int qlcnic_sriov_pf_set_vport_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *npar_info,
+ u16 vport_id)
+{
+ struct qlcnic_cmd_args cmd;
+ int err;
+
+ if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO))
+ return -ENOMEM;
+
+ cmd.req.arg[1] = (vport_id << 16) | 0x1;
+ cmd.req.arg[2] = npar_info->bit_offsets;
+ cmd.req.arg[2] |= npar_info->min_tx_bw << 16;
+ cmd.req.arg[3] = npar_info->max_tx_bw | (npar_info->max_tx_ques << 16);
+ cmd.req.arg[4] = npar_info->max_tx_mac_filters;
+ cmd.req.arg[4] |= npar_info->max_rx_mcast_mac_filters << 16;
+ cmd.req.arg[5] = npar_info->max_rx_ucast_mac_filters |
+ (npar_info->max_rx_ip_addr << 16);
+ cmd.req.arg[6] = npar_info->max_rx_lro_flow |
+ (npar_info->max_rx_status_rings << 16);
+ cmd.req.arg[7] = npar_info->max_rx_buf_rings |
+ (npar_info->max_rx_ques << 16);
+ cmd.req.arg[8] = npar_info->max_tx_vlan_keys;
+ cmd.req.arg[8] |= npar_info->max_local_ipv6_addrs << 16;
+ cmd.req.arg[9] = npar_info->max_remote_ipv6_addrs;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_err(&adapter->pdev->dev,
+ "Failed to set vport info, err=%d\n", err);
+
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *info, u16 func)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_resources *res = &sriov->ff_max;
+ u16 num_macs = sriov->num_allowed_vlans + 1;
+ int ret = -EIO, vpid, id;
+ struct qlcnic_vport *vp;
+ u32 num_vfs, max, temp;
+
+ vpid = qlcnic_sriov_pf_get_vport_handle(adapter, func);
+ if (vpid < 0)
+ return -EINVAL;
+
+ num_vfs = sriov->num_vfs;
+ max = num_vfs + 1;
+ info->bit_offsets = 0xffff;
+ info->max_tx_ques = res->num_tx_queues / max;
+
+ if (qlcnic_83xx_pf_check(adapter))
+ num_macs = QLCNIC_83XX_SRIOV_VF_MAX_MAC;
+
+ info->max_rx_mcast_mac_filters = res->num_rx_mcast_mac_filters;
+
+ if (adapter->ahw->pci_func == func) {
+ info->min_tx_bw = 0;
+ info->max_tx_bw = MAX_BW;
+
+ temp = res->num_rx_ucast_mac_filters - num_macs * num_vfs;
+ info->max_rx_ucast_mac_filters = temp;
+ temp = res->num_tx_mac_filters - num_macs * num_vfs;
+ info->max_tx_mac_filters = temp;
+ temp = num_macs * num_vfs * QLCNIC_SRIOV_VF_MAX_MAC;
+ temp = res->num_rx_mcast_mac_filters - temp;
+ info->max_rx_mcast_mac_filters = temp;
+
+ info->max_tx_ques = res->num_tx_queues - sriov->num_vfs;
+ } else {
+ id = qlcnic_sriov_func_to_index(adapter, func);
+ if (id < 0)
+ return id;
+ vp = sriov->vf_info[id].vp;
+ info->min_tx_bw = vp->min_tx_bw;
+ info->max_tx_bw = vp->max_tx_bw;
+
+ info->max_rx_ucast_mac_filters = num_macs;
+ info->max_tx_mac_filters = num_macs;
+ temp = num_macs * QLCNIC_SRIOV_VF_MAX_MAC;
+ info->max_rx_mcast_mac_filters = temp;
+
+ info->max_tx_ques = QLCNIC_SINGLE_RING;
+ }
+
+ info->max_rx_ip_addr = res->num_destip / max;
+ info->max_rx_status_rings = res->num_rx_status_rings / max;
+ info->max_rx_buf_rings = res->num_rx_buf_rings / max;
+ info->max_rx_ques = res->num_rx_queues / max;
+ info->max_rx_lro_flow = res->num_lro_flows_supported / max;
+ info->max_tx_vlan_keys = res->num_txvlan_keys;
+ info->max_local_ipv6_addrs = res->max_local_ipv6_addrs;
+ info->max_remote_ipv6_addrs = res->max_remote_ipv6_addrs;
+
+ ret = qlcnic_sriov_pf_set_vport_info(adapter, info, vpid);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void qlcnic_sriov_pf_set_ff_max_res(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *info)
+{
+ struct qlcnic_resources *ff_max = &adapter->ahw->sriov->ff_max;
+
+ ff_max->num_tx_mac_filters = info->max_tx_mac_filters;
+ ff_max->num_rx_ucast_mac_filters = info->max_rx_ucast_mac_filters;
+ ff_max->num_rx_mcast_mac_filters = info->max_rx_mcast_mac_filters;
+ ff_max->num_txvlan_keys = info->max_tx_vlan_keys;
+ ff_max->num_rx_queues = info->max_rx_ques;
+ ff_max->num_tx_queues = info->max_tx_ques;
+ ff_max->num_lro_flows_supported = info->max_rx_lro_flow;
+ ff_max->num_destip = info->max_rx_ip_addr;
+ ff_max->num_rx_buf_rings = info->max_rx_buf_rings;
+ ff_max->num_rx_status_rings = info->max_rx_status_rings;
+ ff_max->max_remote_ipv6_addrs = info->max_remote_ipv6_addrs;
+ ff_max->max_local_ipv6_addrs = info->max_local_ipv6_addrs;
+}
+
+static void qlcnic_sriov_set_vf_max_vlan(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *npar_info)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ int temp, total_fn;
+
+ temp = npar_info->max_rx_mcast_mac_filters;
+ total_fn = sriov->num_vfs + 1;
+
+ temp = temp / (QLCNIC_SRIOV_VF_MAX_MAC * total_fn);
+ sriov->num_allowed_vlans = temp - 1;
+
+ if (qlcnic_83xx_pf_check(adapter))
+ sriov->num_allowed_vlans = 1;
+
+ netdev_info(adapter->netdev, "Max Guest VLANs supported per VF = %d\n",
+ sriov->num_allowed_vlans);
+}
+
+static int qlcnic_sriov_get_pf_info(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *npar_info)
+{
+ int err;
+ struct qlcnic_cmd_args cmd;
+
+ if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO))
+ return -ENOMEM;
+
+ cmd.req.arg[1] = 0x2;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to get PF info, err=%d\n", err);
+ goto out;
+ }
+
+ npar_info->total_pf = cmd.rsp.arg[2] & 0xff;
+ npar_info->total_rss_engines = (cmd.rsp.arg[2] >> 8) & 0xff;
+ npar_info->max_vports = MSW(cmd.rsp.arg[2]);
+ npar_info->max_tx_ques = LSW(cmd.rsp.arg[3]);
+ npar_info->max_tx_mac_filters = MSW(cmd.rsp.arg[3]);
+ npar_info->max_rx_mcast_mac_filters = LSW(cmd.rsp.arg[4]);
+ npar_info->max_rx_ucast_mac_filters = MSW(cmd.rsp.arg[4]);
+ npar_info->max_rx_ip_addr = LSW(cmd.rsp.arg[5]);
+ npar_info->max_rx_lro_flow = MSW(cmd.rsp.arg[5]);
+ npar_info->max_rx_status_rings = LSW(cmd.rsp.arg[6]);
+ npar_info->max_rx_buf_rings = MSW(cmd.rsp.arg[6]);
+ npar_info->max_rx_ques = LSW(cmd.rsp.arg[7]);
+ npar_info->max_tx_vlan_keys = MSW(cmd.rsp.arg[7]);
+ npar_info->max_local_ipv6_addrs = LSW(cmd.rsp.arg[8]);
+ npar_info->max_remote_ipv6_addrs = MSW(cmd.rsp.arg[8]);
+
+ qlcnic_sriov_set_vf_max_vlan(adapter, npar_info);
+ qlcnic_sriov_pf_set_ff_max_res(adapter, npar_info);
+ dev_info(&adapter->pdev->dev,
+ "\n\ttotal_pf: %d,\n"
+ "\n\ttotal_rss_engines: %d max_vports: %d max_tx_ques %d,\n"
+ "\tmax_tx_mac_filters: %d max_rx_mcast_mac_filters: %d,\n"
+ "\tmax_rx_ucast_mac_filters: 0x%x, max_rx_ip_addr: %d,\n"
+ "\tmax_rx_lro_flow: %d max_rx_status_rings: %d,\n"
+ "\tmax_rx_buf_rings: %d, max_rx_ques: %d, max_tx_vlan_keys %d\n"
+ "\tmax_local_ipv6_addrs: %d, max_remote_ipv6_addrs: %d\n",
+ npar_info->total_pf, npar_info->total_rss_engines,
+ npar_info->max_vports, npar_info->max_tx_ques,
+ npar_info->max_tx_mac_filters,
+ npar_info->max_rx_mcast_mac_filters,
+ npar_info->max_rx_ucast_mac_filters, npar_info->max_rx_ip_addr,
+ npar_info->max_rx_lro_flow, npar_info->max_rx_status_rings,
+ npar_info->max_rx_buf_rings, npar_info->max_rx_ques,
+ npar_info->max_tx_vlan_keys, npar_info->max_local_ipv6_addrs,
+ npar_info->max_remote_ipv6_addrs);
+
+out:
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+static void qlcnic_sriov_pf_reset_vport_handle(struct qlcnic_adapter *adapter,
+ u8 func)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_vport *vp;
+ int index;
+
+ if (adapter->ahw->pci_func == func) {
+ sriov->vp_handle = 0;
+ } else {
+ index = qlcnic_sriov_func_to_index(adapter, func);
+ if (index < 0)
+ return;
+ vp = sriov->vf_info[index].vp;
+ vp->handle = 0;
+ }
+}
+
+static void qlcnic_sriov_pf_set_vport_handle(struct qlcnic_adapter *adapter,
+ u16 vport_handle, u8 func)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_vport *vp;
+ int index;
+
+ if (adapter->ahw->pci_func == func) {
+ sriov->vp_handle = vport_handle;
+ } else {
+ index = qlcnic_sriov_func_to_index(adapter, func);
+ if (index < 0)
+ return;
+ vp = sriov->vf_info[index].vp;
+ vp->handle = vport_handle;
+ }
+}
+
+static int qlcnic_sriov_pf_get_vport_handle(struct qlcnic_adapter *adapter,
+ u8 func)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_vf_info *vf_info;
+ int index;
+
+ if (adapter->ahw->pci_func == func) {
+ return sriov->vp_handle;
+ } else {
+ index = qlcnic_sriov_func_to_index(adapter, func);
+ if (index >= 0) {
+ vf_info = &sriov->vf_info[index];
+ return vf_info->vp->handle;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int qlcnic_sriov_pf_config_vport(struct qlcnic_adapter *adapter,
+ u8 flag, u16 func)
+{
+ struct qlcnic_cmd_args cmd;
+ int ret;
+ int vpid;
+
+ if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_VPORT))
+ return -ENOMEM;
+
+ if (flag) {
+ cmd.req.arg[3] = func << 8;
+ } else {
+ vpid = qlcnic_sriov_pf_get_vport_handle(adapter, func);
+ if (vpid < 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+ cmd.req.arg[3] = ((vpid & 0xffff) << 8) | 1;
+ }
+
+ ret = qlcnic_issue_cmd(adapter, &cmd);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "Failed %s vport, err %d for func 0x%x\n",
+ (flag ? "enable" : "disable"), ret, func);
+ goto out;
+ }
+
+ if (flag) {
+ vpid = cmd.rsp.arg[2] & 0xffff;
+ qlcnic_sriov_pf_set_vport_handle(adapter, vpid, func);
+ } else {
+ qlcnic_sriov_pf_reset_vport_handle(adapter, func);
+ }
+
+out:
+ qlcnic_free_mbx_args(&cmd);
+ return ret;
+}
+
+static int qlcnic_sriov_pf_cfg_vlan_filtering(struct qlcnic_adapter *adapter,
+ u8 enable)
+{
+ struct qlcnic_cmd_args cmd;
+ int err;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO);
+ if (err)
+ return err;
+
+ cmd.req.arg[1] = 0x4;
+ if (enable) {
+ adapter->flags |= QLCNIC_VLAN_FILTERING;
+ cmd.req.arg[1] |= BIT_16;
+ if (qlcnic_84xx_check(adapter))
+ cmd.req.arg[1] |= QLC_SRIOV_ALLOW_VLAN0;
+ } else {
+ adapter->flags &= ~QLCNIC_VLAN_FILTERING;
+ }
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_err(&adapter->pdev->dev,
+ "Failed to configure VLAN filtering, err=%d\n", err);
+
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+/* On configuring VF flood bit, PFD will receive traffic from all VFs */
+static int qlcnic_sriov_pf_cfg_flood(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_cmd_args cmd;
+ int err;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO);
+ if (err)
+ return err;
+
+ cmd.req.arg[1] = QLC_FLOOD_MODE | QLC_VF_FLOOD_BIT;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_err(&adapter->pdev->dev,
+ "Failed to configure VF Flood bit on PF, err=%d\n",
+ err);
+
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+static int qlcnic_sriov_pf_cfg_eswitch(struct qlcnic_adapter *adapter,
+ u8 func, u8 enable)
+{
+ struct qlcnic_cmd_args cmd;
+ int err = -EIO;
+
+ if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_TOGGLE_ESWITCH))
+ return -ENOMEM;
+
+ cmd.req.arg[0] |= (3 << 29);
+ cmd.req.arg[1] = ((func & 0xf) << 2) | BIT_6 | BIT_1;
+ if (enable)
+ cmd.req.arg[1] |= BIT_0;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+
+ if (err != QLCNIC_RCODE_SUCCESS) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to enable sriov eswitch%d\n", err);
+ err = -EIO;
+ }
+
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+static void qlcnic_sriov_pf_del_flr_queue(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_back_channel *bc = &sriov->bc;
+ int i;
+
+ for (i = 0; i < sriov->num_vfs; i++)
+ cancel_work_sync(&sriov->vf_info[i].flr_work);
+
+ destroy_workqueue(bc->bc_flr_wq);
+}
+
+static int qlcnic_sriov_pf_create_flr_queue(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_back_channel *bc = &adapter->ahw->sriov->bc;
+ struct workqueue_struct *wq;
+
+ wq = create_singlethread_workqueue("qlcnic-flr");
+ if (wq == NULL) {
+ dev_err(&adapter->pdev->dev, "Cannot create FLR workqueue\n");
+ return -ENOMEM;
+ }
+
+ bc->bc_flr_wq = wq;
+ return 0;
+}
+
+void qlcnic_sriov_pf_cleanup(struct qlcnic_adapter *adapter)
+{
+ u8 func = adapter->ahw->pci_func;
+
+ if (!qlcnic_sriov_enable_check(adapter))
+ return;
+
+ qlcnic_sriov_pf_del_flr_queue(adapter);
+ qlcnic_sriov_cfg_bc_intr(adapter, 0);
+ qlcnic_sriov_pf_config_vport(adapter, 0, func);
+ qlcnic_sriov_pf_cfg_eswitch(adapter, func, 0);
+ qlcnic_sriov_pf_cfg_vlan_filtering(adapter, 0);
+ __qlcnic_sriov_cleanup(adapter);
+ adapter->ahw->op_mode = QLCNIC_MGMT_FUNC;
+ clear_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state);
+}
+
+void qlcnic_sriov_pf_disable(struct qlcnic_adapter *adapter)
+{
+ if (!qlcnic_sriov_pf_check(adapter))
+ return;
+
+ if (!qlcnic_sriov_enable_check(adapter))
+ return;
+
+ pci_disable_sriov(adapter->pdev);
+ netdev_info(adapter->netdev,
+ "SR-IOV is disabled successfully on port %d\n",
+ adapter->portnum);
+}
+
+static int qlcnic_pci_sriov_disable(struct qlcnic_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ if (pci_vfs_assigned(adapter->pdev)) {
+ netdev_err(adapter->netdev,
+ "SR-IOV VFs belonging to port %d are assigned to VMs. SR-IOV can not be disabled on this port\n",
+ adapter->portnum);
+ netdev_info(adapter->netdev,
+ "Please detach SR-IOV VFs belonging to port %d from VMs, and then try to disable SR-IOV on this port\n",
+ adapter->portnum);
+ return -EPERM;
+ }
+
+ qlcnic_sriov_pf_disable(adapter);
+
+ rtnl_lock();
+ if (netif_running(netdev))
+ __qlcnic_down(adapter, netdev);
+
+ qlcnic_sriov_free_vlans(adapter);
+
+ qlcnic_sriov_pf_cleanup(adapter);
+
+ /* After disabling SRIOV re-init the driver in default mode
+ configure opmode based on op_mode of function
+ */
+ if (qlcnic_83xx_configure_opmode(adapter)) {
+ rtnl_unlock();
+ return -EIO;
+ }
+
+ if (netif_running(netdev))
+ __qlcnic_up(adapter, netdev);
+
+ rtnl_unlock();
+ return 0;
+}
+
+static int qlcnic_sriov_pf_init(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_info nic_info, pf_info, vp_info;
+ int err;
+ u8 func = ahw->pci_func;
+
+ if (!qlcnic_sriov_enable_check(adapter))
+ return 0;
+
+ err = qlcnic_sriov_pf_cfg_vlan_filtering(adapter, 1);
+ if (err)
+ return err;
+
+ if (qlcnic_84xx_check(adapter)) {
+ err = qlcnic_sriov_pf_cfg_flood(adapter);
+ if (err)
+ goto disable_vlan_filtering;
+ }
+
+ err = qlcnic_sriov_pf_cfg_eswitch(adapter, func, 1);
+ if (err)
+ goto disable_vlan_filtering;
+
+ err = qlcnic_sriov_pf_config_vport(adapter, 1, func);
+ if (err)
+ goto disable_eswitch;
+
+ err = qlcnic_sriov_get_pf_info(adapter, &pf_info);
+ if (err)
+ goto delete_vport;
+
+ err = qlcnic_get_nic_info(adapter, &nic_info, func);
+ if (err)
+ goto delete_vport;
+
+ err = qlcnic_sriov_pf_cal_res_limit(adapter, &vp_info, func);
+ if (err)
+ goto delete_vport;
+
+ err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
+ if (err)
+ goto delete_vport;
+
+ ahw->physical_port = (u8) nic_info.phys_port;
+ ahw->switch_mode = nic_info.switch_mode;
+ ahw->max_mtu = nic_info.max_mtu;
+ ahw->capabilities = nic_info.capabilities;
+ ahw->nic_mode = QLC_83XX_SRIOV_MODE;
+ return err;
+
+delete_vport:
+ qlcnic_sriov_pf_config_vport(adapter, 0, func);
+
+disable_eswitch:
+ qlcnic_sriov_pf_cfg_eswitch(adapter, func, 0);
+
+disable_vlan_filtering:
+ qlcnic_sriov_pf_cfg_vlan_filtering(adapter, 0);
+
+ return err;
+}
+
+static int qlcnic_sriov_pf_enable(struct qlcnic_adapter *adapter, int num_vfs)
+{
+ int err;
+
+ if (!qlcnic_sriov_enable_check(adapter))
+ return 0;
+
+ err = pci_enable_sriov(adapter->pdev, num_vfs);
+ if (err)
+ qlcnic_sriov_pf_cleanup(adapter);
+
+ return err;
+}
+
+static int __qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter,
+ int num_vfs)
+{
+ int err = 0;
+
+ set_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state);
+ adapter->ahw->op_mode = QLCNIC_SRIOV_PF_FUNC;
+
+ err = qlcnic_sriov_init(adapter, num_vfs);
+ if (err)
+ goto clear_op_mode;
+
+ err = qlcnic_sriov_pf_create_flr_queue(adapter);
+ if (err)
+ goto sriov_cleanup;
+
+ err = qlcnic_sriov_pf_init(adapter);
+ if (err)
+ goto del_flr_queue;
+
+ err = qlcnic_sriov_alloc_vlans(adapter);
+ if (err)
+ goto del_flr_queue;
+
+ return err;
+
+del_flr_queue:
+ qlcnic_sriov_pf_del_flr_queue(adapter);
+
+sriov_cleanup:
+ __qlcnic_sriov_cleanup(adapter);
+
+clear_op_mode:
+ clear_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state);
+ adapter->ahw->op_mode = QLCNIC_MGMT_FUNC;
+ return err;
+}
+
+static int qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter, int num_vfs)
+{
+ struct net_device *netdev = adapter->netdev;
+ int err;
+
+ if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ netdev_err(netdev,
+ "SR-IOV cannot be enabled, when legacy interrupts are enabled\n");
+ return -EIO;
+ }
+
+ rtnl_lock();
+ if (netif_running(netdev))
+ __qlcnic_down(adapter, netdev);
+
+ err = __qlcnic_pci_sriov_enable(adapter, num_vfs);
+ if (err)
+ goto error;
+
+ if (netif_running(netdev))
+ __qlcnic_up(adapter, netdev);
+
+ rtnl_unlock();
+ err = qlcnic_sriov_pf_enable(adapter, num_vfs);
+ if (!err) {
+ netdev_info(netdev,
+ "SR-IOV is enabled successfully on port %d\n",
+ adapter->portnum);
+ /* Return number of vfs enabled */
+ return num_vfs;
+ }
+
+ rtnl_lock();
+ if (netif_running(netdev))
+ __qlcnic_down(adapter, netdev);
+
+error:
+ if (!qlcnic_83xx_configure_opmode(adapter)) {
+ if (netif_running(netdev))
+ __qlcnic_up(adapter, netdev);
+ }
+
+ rtnl_unlock();
+ netdev_info(netdev, "Failed to enable SR-IOV on port %d\n",
+ adapter->portnum);
+
+ return err;
+}
+
+int qlcnic_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(dev);
+ int err;
+
+ if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ return -EBUSY;
+
+ if (num_vfs == 0)
+ err = qlcnic_pci_sriov_disable(adapter);
+ else
+ err = qlcnic_pci_sriov_enable(adapter, num_vfs);
+
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return err;
+}
+
+static int qlcnic_sriov_set_vf_acl(struct qlcnic_adapter *adapter, u8 func)
+{
+ struct qlcnic_cmd_args cmd;
+ struct qlcnic_vport *vp;
+ int err, id;
+ u8 *mac;
+
+ id = qlcnic_sriov_func_to_index(adapter, func);
+ if (id < 0)
+ return id;
+
+ vp = adapter->ahw->sriov->vf_info[id].vp;
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO);
+ if (err)
+ return err;
+
+ cmd.req.arg[1] = 0x3 | func << 16;
+ if (vp->spoofchk == true) {
+ mac = vp->mac;
+ cmd.req.arg[2] |= BIT_1 | BIT_3 | BIT_8;
+ cmd.req.arg[4] = mac[5] | mac[4] << 8 | mac[3] << 16 |
+ mac[2] << 24;
+ cmd.req.arg[5] = mac[1] | mac[0] << 8;
+ }
+
+ if (vp->vlan_mode == QLC_PVID_MODE) {
+ cmd.req.arg[2] |= BIT_6;
+ cmd.req.arg[3] |= vp->pvid << 8;
+ }
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_err(&adapter->pdev->dev, "Failed to set ACL, err=%d\n",
+ err);
+
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+}
+
+static int qlcnic_sriov_set_vf_vport_info(struct qlcnic_adapter *adapter,
+ u16 func)
+{
+ struct qlcnic_info defvp_info;
+ int err;
+
+ err = qlcnic_sriov_pf_cal_res_limit(adapter, &defvp_info, func);
+ if (err)
+ return -EIO;
+
+ err = qlcnic_sriov_set_vf_acl(adapter, func);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_channel_cfg_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_vport *vp = vf->vp;
+ struct qlcnic_adapter *adapter;
+ struct qlcnic_sriov *sriov;
+ u16 func = vf->pci_func;
+ size_t size;
+ int err;
+
+ adapter = vf->adapter;
+ sriov = adapter->ahw->sriov;
+
+ if (trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) {
+ err = qlcnic_sriov_pf_config_vport(adapter, 1, func);
+ if (!err) {
+ err = qlcnic_sriov_set_vf_vport_info(adapter, func);
+ if (err)
+ qlcnic_sriov_pf_config_vport(adapter, 0, func);
+ }
+ } else {
+ if (vp->vlan_mode == QLC_GUEST_VLAN_MODE) {
+ size = sizeof(*vf->sriov_vlans);
+ size = size * sriov->num_allowed_vlans;
+ memset(vf->sriov_vlans, 0, size);
+ }
+
+ err = qlcnic_sriov_pf_config_vport(adapter, 0, func);
+ }
+
+ if (err)
+ goto err_out;
+
+ cmd->rsp.arg[0] |= (1 << 25);
+
+ if (trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT)
+ set_bit(QLC_BC_VF_STATE, &vf->state);
+ else
+ clear_bit(QLC_BC_VF_STATE, &vf->state);
+
+ return err;
+
+err_out:
+ cmd->rsp.arg[0] |= (2 << 25);
+ return err;
+}
+
+static int qlcnic_sriov_cfg_vf_def_mac(struct qlcnic_adapter *adapter,
+ struct qlcnic_vf_info *vf,
+ u16 vlan, u8 op)
+{
+ struct qlcnic_cmd_args *cmd;
+ struct qlcnic_macvlan_mbx mv;
+ struct qlcnic_vport *vp;
+ u8 *addr;
+ int err;
+ u32 *buf;
+ int vpid;
+
+ vp = vf->vp;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
+ if (!cmd)
+ return -ENOMEM;
+
+ err = qlcnic_alloc_mbx_args(cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN);
+ if (err)
+ goto free_cmd;
+
+ cmd->type = QLC_83XX_MBX_CMD_NO_WAIT;
+ vpid = qlcnic_sriov_pf_get_vport_handle(adapter, vf->pci_func);
+ if (vpid < 0) {
+ err = -EINVAL;
+ goto free_args;
+ }
+
+ if (vlan)
+ op = ((op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ?
+ QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL);
+
+ cmd->req.arg[1] = op | (1 << 8) | (3 << 6);
+ cmd->req.arg[1] |= ((vpid & 0xffff) << 16) | BIT_31;
+
+ addr = vp->mac;
+ mv.vlan = vlan;
+ mv.mac_addr0 = addr[0];
+ mv.mac_addr1 = addr[1];
+ mv.mac_addr2 = addr[2];
+ mv.mac_addr3 = addr[3];
+ mv.mac_addr4 = addr[4];
+ mv.mac_addr5 = addr[5];
+ buf = &cmd->req.arg[2];
+ memcpy(buf, &mv, sizeof(struct qlcnic_macvlan_mbx));
+
+ err = qlcnic_issue_cmd(adapter, cmd);
+
+ if (!err)
+ return err;
+
+free_args:
+ qlcnic_free_mbx_args(cmd);
+free_cmd:
+ kfree(cmd);
+ return err;
+}
+
+static int qlcnic_sriov_validate_create_rx_ctx(struct qlcnic_cmd_args *cmd)
+{
+ if ((cmd->req.arg[0] >> 29) != 0x3)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void qlcnic_83xx_cfg_default_mac_vlan(struct qlcnic_adapter *adapter,
+ struct qlcnic_vf_info *vf,
+ int opcode)
+{
+ struct qlcnic_sriov *sriov;
+ u16 vlan;
+ int i;
+
+ sriov = adapter->ahw->sriov;
+
+ spin_lock_bh(&vf->vlan_list_lock);
+ if (vf->num_vlan) {
+ for (i = 0; i < sriov->num_allowed_vlans; i++) {
+ vlan = vf->sriov_vlans[i];
+ if (vlan)
+ qlcnic_sriov_cfg_vf_def_mac(adapter, vf, vlan,
+ opcode);
+ }
+ }
+ spin_unlock_bh(&vf->vlan_list_lock);
+
+ if (vf->vp->vlan_mode != QLC_PVID_MODE) {
+ if (qlcnic_83xx_pf_check(adapter) &&
+ qlcnic_sriov_check_any_vlan(vf))
+ return;
+ qlcnic_sriov_cfg_vf_def_mac(adapter, vf, 0, opcode);
+ }
+}
+
+static int qlcnic_sriov_pf_create_rx_ctx_cmd(struct qlcnic_bc_trans *tran,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = tran->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ struct qlcnic_rcv_mbx_out *mbx_out;
+ int err;
+
+ err = qlcnic_sriov_validate_create_rx_ctx(cmd);
+ if (err) {
+ cmd->rsp.arg[0] |= (0x6 << 25);
+ return err;
+ }
+
+ cmd->req.arg[6] = vf->vp->handle;
+ err = qlcnic_issue_cmd(adapter, cmd);
+
+ if (!err) {
+ mbx_out = (struct qlcnic_rcv_mbx_out *)&cmd->rsp.arg[1];
+ vf->rx_ctx_id = mbx_out->ctx_id;
+ qlcnic_83xx_cfg_default_mac_vlan(adapter, vf, QLCNIC_MAC_ADD);
+ } else {
+ vf->rx_ctx_id = 0;
+ }
+
+ return err;
+}
+
+static int qlcnic_sriov_pf_mac_address_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ u8 type, *mac;
+
+ type = cmd->req.arg[1];
+ switch (type) {
+ case QLCNIC_SET_STATION_MAC:
+ case QLCNIC_SET_FAC_DEF_MAC:
+ cmd->rsp.arg[0] = (2 << 25);
+ break;
+ case QLCNIC_GET_CURRENT_MAC:
+ cmd->rsp.arg[0] = (1 << 25);
+ mac = vf->vp->mac;
+ cmd->rsp.arg[2] = mac[1] | ((mac[0] << 8) & 0xff00);
+ cmd->rsp.arg[1] = mac[5] | ((mac[4] << 8) & 0xff00) |
+ ((mac[3]) << 16 & 0xff0000) |
+ ((mac[2]) << 24 & 0xff000000);
+ }
+
+ return 0;
+}
+
+static int qlcnic_sriov_validate_create_tx_ctx(struct qlcnic_cmd_args *cmd)
+{
+ if ((cmd->req.arg[0] >> 29) != 0x3)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_create_tx_ctx_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ struct qlcnic_tx_mbx_out *mbx_out;
+ int err;
+
+ err = qlcnic_sriov_validate_create_tx_ctx(cmd);
+ if (err) {
+ cmd->rsp.arg[0] |= (0x6 << 25);
+ return err;
+ }
+
+ cmd->req.arg[5] |= vf->vp->handle << 16;
+ err = qlcnic_issue_cmd(adapter, cmd);
+ if (!err) {
+ mbx_out = (struct qlcnic_tx_mbx_out *)&cmd->rsp.arg[2];
+ vf->tx_ctx_id = mbx_out->ctx_id;
+ } else {
+ vf->tx_ctx_id = 0;
+ }
+
+ return err;
+}
+
+static int qlcnic_sriov_validate_del_rx_ctx(struct qlcnic_vf_info *vf,
+ struct qlcnic_cmd_args *cmd)
+{
+ if ((cmd->req.arg[0] >> 29) != 0x3)
+ return -EINVAL;
+
+ if ((cmd->req.arg[1] & 0xffff) != vf->rx_ctx_id)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_del_rx_ctx_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ int err;
+
+ err = qlcnic_sriov_validate_del_rx_ctx(vf, cmd);
+ if (err) {
+ cmd->rsp.arg[0] |= (0x6 << 25);
+ return err;
+ }
+
+ qlcnic_83xx_cfg_default_mac_vlan(adapter, vf, QLCNIC_MAC_DEL);
+ cmd->req.arg[1] |= vf->vp->handle << 16;
+ err = qlcnic_issue_cmd(adapter, cmd);
+
+ if (!err)
+ vf->rx_ctx_id = 0;
+
+ return err;
+}
+
+static int qlcnic_sriov_validate_del_tx_ctx(struct qlcnic_vf_info *vf,
+ struct qlcnic_cmd_args *cmd)
+{
+ if ((cmd->req.arg[0] >> 29) != 0x3)
+ return -EINVAL;
+
+ if ((cmd->req.arg[1] & 0xffff) != vf->tx_ctx_id)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_del_tx_ctx_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ int err;
+
+ err = qlcnic_sriov_validate_del_tx_ctx(vf, cmd);
+ if (err) {
+ cmd->rsp.arg[0] |= (0x6 << 25);
+ return err;
+ }
+
+ cmd->req.arg[1] |= vf->vp->handle << 16;
+ err = qlcnic_issue_cmd(adapter, cmd);
+
+ if (!err)
+ vf->tx_ctx_id = 0;
+
+ return err;
+}
+
+static int qlcnic_sriov_validate_cfg_lro(struct qlcnic_vf_info *vf,
+ struct qlcnic_cmd_args *cmd)
+{
+ if ((cmd->req.arg[1] >> 16) != vf->rx_ctx_id)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_cfg_lro_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ int err;
+
+ err = qlcnic_sriov_validate_cfg_lro(vf, cmd);
+ if (err) {
+ cmd->rsp.arg[0] |= (0x6 << 25);
+ return err;
+ }
+
+ err = qlcnic_issue_cmd(adapter, cmd);
+ return err;
+}
+
+static int qlcnic_sriov_pf_cfg_ip_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ int err;
+
+ cmd->req.arg[1] |= vf->vp->handle << 16;
+ cmd->req.arg[1] |= BIT_31;
+
+ err = qlcnic_issue_cmd(adapter, cmd);
+ return err;
+}
+
+static int qlcnic_sriov_validate_cfg_intrpt(struct qlcnic_vf_info *vf,
+ struct qlcnic_cmd_args *cmd)
+{
+ if (((cmd->req.arg[1] >> 8) & 0xff) != vf->pci_func)
+ return -EINVAL;
+
+ if (!(cmd->req.arg[1] & BIT_16))
+ return -EINVAL;
+
+ if ((cmd->req.arg[1] & 0xff) != 0x1)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_cfg_intrpt_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ int err;
+
+ err = qlcnic_sriov_validate_cfg_intrpt(vf, cmd);
+ if (err)
+ cmd->rsp.arg[0] |= (0x6 << 25);
+ else
+ err = qlcnic_issue_cmd(adapter, cmd);
+
+ return err;
+}
+
+static int qlcnic_sriov_validate_mtu(struct qlcnic_adapter *adapter,
+ struct qlcnic_vf_info *vf,
+ struct qlcnic_cmd_args *cmd)
+{
+ if (cmd->req.arg[1] != vf->rx_ctx_id)
+ return -EINVAL;
+
+ if (cmd->req.arg[2] > adapter->ahw->max_mtu)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_set_mtu_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ int err;
+
+ err = qlcnic_sriov_validate_mtu(adapter, vf, cmd);
+ if (err)
+ cmd->rsp.arg[0] |= (0x6 << 25);
+ else
+ err = qlcnic_issue_cmd(adapter, cmd);
+
+ return err;
+}
+
+static int qlcnic_sriov_validate_get_nic_info(struct qlcnic_vf_info *vf,
+ struct qlcnic_cmd_args *cmd)
+{
+ if (cmd->req.arg[1] & BIT_31) {
+ if (((cmd->req.arg[1] >> 16) & 0x7fff) != vf->pci_func)
+ return -EINVAL;
+ } else {
+ cmd->req.arg[1] |= vf->vp->handle << 16;
+ }
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_get_nic_info_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ int err;
+
+ err = qlcnic_sriov_validate_get_nic_info(vf, cmd);
+ if (err) {
+ cmd->rsp.arg[0] |= (0x6 << 25);
+ return err;
+ }
+
+ err = qlcnic_issue_cmd(adapter, cmd);
+ return err;
+}
+
+static int qlcnic_sriov_validate_cfg_rss(struct qlcnic_vf_info *vf,
+ struct qlcnic_cmd_args *cmd)
+{
+ if (cmd->req.arg[1] != vf->rx_ctx_id)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_cfg_rss_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ int err;
+
+ err = qlcnic_sriov_validate_cfg_rss(vf, cmd);
+ if (err)
+ cmd->rsp.arg[0] |= (0x6 << 25);
+ else
+ err = qlcnic_issue_cmd(adapter, cmd);
+
+ return err;
+}
+
+static int qlcnic_sriov_validate_cfg_intrcoal(struct qlcnic_adapter *adapter,
+ struct qlcnic_vf_info *vf,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal;
+ u16 ctx_id, pkts, time;
+ int err = -EINVAL;
+ u8 type;
+
+ type = cmd->req.arg[1] & QLC_INTR_COAL_TYPE_MASK;
+ ctx_id = cmd->req.arg[1] >> 16;
+ pkts = cmd->req.arg[2] & 0xffff;
+ time = cmd->req.arg[2] >> 16;
+
+ switch (type) {
+ case QLCNIC_INTR_COAL_TYPE_RX:
+ if (ctx_id != vf->rx_ctx_id || pkts > coal->rx_packets ||
+ time < coal->rx_time_us)
+ goto err_label;
+ break;
+ case QLCNIC_INTR_COAL_TYPE_TX:
+ if (ctx_id != vf->tx_ctx_id || pkts > coal->tx_packets ||
+ time < coal->tx_time_us)
+ goto err_label;
+ break;
+ default:
+ netdev_err(adapter->netdev, "Invalid coalescing type 0x%x received\n",
+ type);
+ return err;
+ }
+
+ return 0;
+
+err_label:
+ netdev_err(adapter->netdev, "Expected: rx_ctx_id 0x%x rx_packets 0x%x rx_time_us 0x%x tx_ctx_id 0x%x tx_packets 0x%x tx_time_us 0x%x\n",
+ vf->rx_ctx_id, coal->rx_packets, coal->rx_time_us,
+ vf->tx_ctx_id, coal->tx_packets, coal->tx_time_us);
+ netdev_err(adapter->netdev, "Received: ctx_id 0x%x packets 0x%x time_us 0x%x type 0x%x\n",
+ ctx_id, pkts, time, type);
+
+ return err;
+}
+
+static int qlcnic_sriov_pf_cfg_intrcoal_cmd(struct qlcnic_bc_trans *tran,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = tran->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ int err;
+
+ err = qlcnic_sriov_validate_cfg_intrcoal(adapter, vf, cmd);
+ if (err) {
+ cmd->rsp.arg[0] |= (0x6 << 25);
+ return err;
+ }
+
+ err = qlcnic_issue_cmd(adapter, cmd);
+ return err;
+}
+
+static int qlcnic_sriov_validate_cfg_macvlan(struct qlcnic_adapter *adapter,
+ struct qlcnic_vf_info *vf,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vport *vp = vf->vp;
+ u8 op, new_op;
+
+ if (!(cmd->req.arg[1] & BIT_8))
+ return -EINVAL;
+
+ cmd->req.arg[1] |= (vf->vp->handle << 16);
+ cmd->req.arg[1] |= BIT_31;
+
+ if (vp->vlan_mode == QLC_PVID_MODE) {
+ op = cmd->req.arg[1] & 0x7;
+ cmd->req.arg[1] &= ~0x7;
+ new_op = (op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ?
+ QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL;
+ cmd->req.arg[3] |= vp->pvid << 16;
+ cmd->req.arg[1] |= new_op;
+ }
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_cfg_macvlan_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ int err;
+
+ err = qlcnic_sriov_validate_cfg_macvlan(adapter, vf, cmd);
+ if (err) {
+ cmd->rsp.arg[0] |= (0x6 << 25);
+ return err;
+ }
+
+ err = qlcnic_issue_cmd(adapter, cmd);
+ return err;
+}
+
+static int qlcnic_sriov_validate_linkevent(struct qlcnic_vf_info *vf,
+ struct qlcnic_cmd_args *cmd)
+{
+ if ((cmd->req.arg[1] >> 16) != vf->rx_ctx_id)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_linkevent_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ int err;
+
+ err = qlcnic_sriov_validate_linkevent(vf, cmd);
+ if (err) {
+ cmd->rsp.arg[0] |= (0x6 << 25);
+ return err;
+ }
+
+ err = qlcnic_issue_cmd(adapter, cmd);
+ return err;
+}
+
+static int qlcnic_sriov_pf_cfg_promisc_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ int err;
+
+ cmd->req.arg[1] |= vf->vp->handle << 16;
+ cmd->req.arg[1] |= BIT_31;
+ err = qlcnic_issue_cmd(adapter, cmd);
+ return err;
+}
+
+static int qlcnic_sriov_pf_get_acl_cmd(struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = trans->vf;
+ struct qlcnic_vport *vp = vf->vp;
+ u8 mode = vp->vlan_mode;
+ struct qlcnic_adapter *adapter;
+ struct qlcnic_sriov *sriov;
+
+ adapter = vf->adapter;
+ sriov = adapter->ahw->sriov;
+
+ cmd->rsp.arg[0] |= 1 << 25;
+
+ /* For 84xx adapter in case of PVID , PFD should send vlan mode as
+ * QLC_NO_VLAN_MODE to VFD which is zero in mailbox response
+ */
+ if (qlcnic_84xx_check(adapter) && mode == QLC_PVID_MODE)
+ return 0;
+
+ switch (mode) {
+ case QLC_GUEST_VLAN_MODE:
+ cmd->rsp.arg[1] = mode | 1 << 8;
+ cmd->rsp.arg[2] = sriov->num_allowed_vlans << 16;
+ break;
+ case QLC_PVID_MODE:
+ cmd->rsp.arg[1] = mode | 1 << 8 | vp->pvid << 16;
+ break;
+ }
+
+ return 0;
+}
+
+static int qlcnic_sriov_pf_del_guest_vlan(struct qlcnic_adapter *adapter,
+ struct qlcnic_vf_info *vf,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ u16 vlan;
+
+ if (!qlcnic_sriov_check_any_vlan(vf))
+ return -EINVAL;
+
+ vlan = cmd->req.arg[1] >> 16;
+ if (!vf->rx_ctx_id) {
+ qlcnic_sriov_del_vlan_id(sriov, vf, vlan);
+ return 0;
+ }
+
+ qlcnic_sriov_cfg_vf_def_mac(adapter, vf, vlan, QLCNIC_MAC_DEL);
+ qlcnic_sriov_del_vlan_id(sriov, vf, vlan);
+
+ if (qlcnic_83xx_pf_check(adapter))
+ qlcnic_sriov_cfg_vf_def_mac(adapter, vf,
+ 0, QLCNIC_MAC_ADD);
+ return 0;
+}
+
+static int qlcnic_sriov_pf_add_guest_vlan(struct qlcnic_adapter *adapter,
+ struct qlcnic_vf_info *vf,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ int err = -EIO;
+ u16 vlan;
+
+ if (qlcnic_83xx_pf_check(adapter) && qlcnic_sriov_check_any_vlan(vf))
+ return err;
+
+ vlan = cmd->req.arg[1] >> 16;
+
+ if (!vf->rx_ctx_id) {
+ qlcnic_sriov_add_vlan_id(sriov, vf, vlan);
+ return 0;
+ }
+
+ if (qlcnic_83xx_pf_check(adapter)) {
+ err = qlcnic_sriov_cfg_vf_def_mac(adapter, vf, 0,
+ QLCNIC_MAC_DEL);
+ if (err)
+ return err;
+ }
+
+ err = qlcnic_sriov_cfg_vf_def_mac(adapter, vf, vlan, QLCNIC_MAC_ADD);
+
+ if (err) {
+ if (qlcnic_83xx_pf_check(adapter))
+ qlcnic_sriov_cfg_vf_def_mac(adapter, vf, 0,
+ QLCNIC_MAC_ADD);
+ return err;
+ }
+
+ qlcnic_sriov_add_vlan_id(sriov, vf, vlan);
+ return err;
+}
+
+static int qlcnic_sriov_pf_cfg_guest_vlan_cmd(struct qlcnic_bc_trans *tran,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_vf_info *vf = tran->vf;
+ struct qlcnic_adapter *adapter = vf->adapter;
+ struct qlcnic_vport *vp = vf->vp;
+ int err = -EIO;
+ u8 op;
+
+ if (vp->vlan_mode != QLC_GUEST_VLAN_MODE) {
+ cmd->rsp.arg[0] |= 2 << 25;
+ return err;
+ }
+
+ op = cmd->req.arg[1] & 0xf;
+
+ if (op)
+ err = qlcnic_sriov_pf_add_guest_vlan(adapter, vf, cmd);
+ else
+ err = qlcnic_sriov_pf_del_guest_vlan(adapter, vf, cmd);
+
+ cmd->rsp.arg[0] |= err ? 2 << 25 : 1 << 25;
+ return err;
+}
+
+static const int qlcnic_pf_passthru_supp_cmds[] = {
+ QLCNIC_CMD_GET_STATISTICS,
+ QLCNIC_CMD_GET_PORT_CONFIG,
+ QLCNIC_CMD_GET_LINK_STATUS,
+ QLCNIC_CMD_INIT_NIC_FUNC,
+ QLCNIC_CMD_STOP_NIC_FUNC,
+};
+
+static const struct qlcnic_sriov_cmd_handler qlcnic_pf_bc_cmd_hdlr[] = {
+ [QLCNIC_BC_CMD_CHANNEL_INIT] = {&qlcnic_sriov_pf_channel_cfg_cmd},
+ [QLCNIC_BC_CMD_CHANNEL_TERM] = {&qlcnic_sriov_pf_channel_cfg_cmd},
+ [QLCNIC_BC_CMD_GET_ACL] = {&qlcnic_sriov_pf_get_acl_cmd},
+ [QLCNIC_BC_CMD_CFG_GUEST_VLAN] = {&qlcnic_sriov_pf_cfg_guest_vlan_cmd},
+};
+
+static const struct qlcnic_sriov_fw_cmd_handler qlcnic_pf_fw_cmd_hdlr[] = {
+ {QLCNIC_CMD_CREATE_RX_CTX, qlcnic_sriov_pf_create_rx_ctx_cmd},
+ {QLCNIC_CMD_CREATE_TX_CTX, qlcnic_sriov_pf_create_tx_ctx_cmd},
+ {QLCNIC_CMD_MAC_ADDRESS, qlcnic_sriov_pf_mac_address_cmd},
+ {QLCNIC_CMD_DESTROY_RX_CTX, qlcnic_sriov_pf_del_rx_ctx_cmd},
+ {QLCNIC_CMD_DESTROY_TX_CTX, qlcnic_sriov_pf_del_tx_ctx_cmd},
+ {QLCNIC_CMD_CONFIGURE_HW_LRO, qlcnic_sriov_pf_cfg_lro_cmd},
+ {QLCNIC_CMD_CONFIGURE_IP_ADDR, qlcnic_sriov_pf_cfg_ip_cmd},
+ {QLCNIC_CMD_CONFIG_INTRPT, qlcnic_sriov_pf_cfg_intrpt_cmd},
+ {QLCNIC_CMD_SET_MTU, qlcnic_sriov_pf_set_mtu_cmd},
+ {QLCNIC_CMD_GET_NIC_INFO, qlcnic_sriov_pf_get_nic_info_cmd},
+ {QLCNIC_CMD_CONFIGURE_RSS, qlcnic_sriov_pf_cfg_rss_cmd},
+ {QLCNIC_CMD_CONFIG_INTR_COAL, qlcnic_sriov_pf_cfg_intrcoal_cmd},
+ {QLCNIC_CMD_CONFIG_MAC_VLAN, qlcnic_sriov_pf_cfg_macvlan_cmd},
+ {QLCNIC_CMD_GET_LINK_EVENT, qlcnic_sriov_pf_linkevent_cmd},
+ {QLCNIC_CMD_CONFIGURE_MAC_RX_MODE, qlcnic_sriov_pf_cfg_promisc_cmd},
+};
+
+void qlcnic_sriov_pf_process_bc_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_bc_trans *trans,
+ struct qlcnic_cmd_args *cmd)
+{
+ u8 size, cmd_op;
+
+ cmd_op = trans->req_hdr->cmd_op;
+
+ if (trans->req_hdr->op_type == QLC_BC_CMD) {
+ size = ARRAY_SIZE(qlcnic_pf_bc_cmd_hdlr);
+ if (cmd_op < size) {
+ qlcnic_pf_bc_cmd_hdlr[cmd_op].fn(trans, cmd);
+ return;
+ }
+ } else {
+ int i;
+ size = ARRAY_SIZE(qlcnic_pf_fw_cmd_hdlr);
+ for (i = 0; i < size; i++) {
+ if (cmd_op == qlcnic_pf_fw_cmd_hdlr[i].cmd) {
+ qlcnic_pf_fw_cmd_hdlr[i].fn(trans, cmd);
+ return;
+ }
+ }
+
+ size = ARRAY_SIZE(qlcnic_pf_passthru_supp_cmds);
+ for (i = 0; i < size; i++) {
+ if (cmd_op == qlcnic_pf_passthru_supp_cmds[i]) {
+ qlcnic_issue_cmd(adapter, cmd);
+ return;
+ }
+ }
+ }
+
+ cmd->rsp.arg[0] |= (0x9 << 25);
+}
+
+void qlcnic_pf_set_interface_id_create_rx_ctx(struct qlcnic_adapter *adapter,
+ u32 *int_id)
+{
+ u16 vpid;
+
+ vpid = qlcnic_sriov_pf_get_vport_handle(adapter,
+ adapter->ahw->pci_func);
+ *int_id |= vpid;
+}
+
+void qlcnic_pf_set_interface_id_del_rx_ctx(struct qlcnic_adapter *adapter,
+ u32 *int_id)
+{
+ u16 vpid;
+
+ vpid = qlcnic_sriov_pf_get_vport_handle(adapter,
+ adapter->ahw->pci_func);
+ *int_id |= vpid << 16;
+}
+
+void qlcnic_pf_set_interface_id_create_tx_ctx(struct qlcnic_adapter *adapter,
+ u32 *int_id)
+{
+ int vpid;
+
+ vpid = qlcnic_sriov_pf_get_vport_handle(adapter,
+ adapter->ahw->pci_func);
+ *int_id |= vpid << 16;
+}
+
+void qlcnic_pf_set_interface_id_del_tx_ctx(struct qlcnic_adapter *adapter,
+ u32 *int_id)
+{
+ u16 vpid;
+
+ vpid = qlcnic_sriov_pf_get_vport_handle(adapter,
+ adapter->ahw->pci_func);
+ *int_id |= vpid << 16;
+}
+
+void qlcnic_pf_set_interface_id_promisc(struct qlcnic_adapter *adapter,
+ u32 *int_id)
+{
+ u16 vpid;
+
+ vpid = qlcnic_sriov_pf_get_vport_handle(adapter,
+ adapter->ahw->pci_func);
+ *int_id |= (vpid << 16) | BIT_31;
+}
+
+void qlcnic_pf_set_interface_id_ipaddr(struct qlcnic_adapter *adapter,
+ u32 *int_id)
+{
+ u16 vpid;
+
+ vpid = qlcnic_sriov_pf_get_vport_handle(adapter,
+ adapter->ahw->pci_func);
+ *int_id |= (vpid << 16) | BIT_31;
+}
+
+void qlcnic_pf_set_interface_id_macaddr(struct qlcnic_adapter *adapter,
+ u32 *int_id)
+{
+ u16 vpid;
+
+ vpid = qlcnic_sriov_pf_get_vport_handle(adapter,
+ adapter->ahw->pci_func);
+ *int_id |= (vpid << 16) | BIT_31;
+}
+
+static void qlcnic_sriov_del_rx_ctx(struct qlcnic_adapter *adapter,
+ struct qlcnic_vf_info *vf)
+{
+ struct qlcnic_cmd_args cmd;
+ int vpid;
+
+ if (!vf->rx_ctx_id)
+ return;
+
+ if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_RX_CTX))
+ return;
+
+ vpid = qlcnic_sriov_pf_get_vport_handle(adapter, vf->pci_func);
+ if (vpid >= 0) {
+ cmd.req.arg[1] = vf->rx_ctx_id | (vpid & 0xffff) << 16;
+ if (qlcnic_issue_cmd(adapter, &cmd))
+ dev_err(&adapter->pdev->dev,
+ "Failed to delete Tx ctx in firmware for func 0x%x\n",
+ vf->pci_func);
+ else
+ vf->rx_ctx_id = 0;
+ }
+
+ qlcnic_free_mbx_args(&cmd);
+}
+
+static void qlcnic_sriov_del_tx_ctx(struct qlcnic_adapter *adapter,
+ struct qlcnic_vf_info *vf)
+{
+ struct qlcnic_cmd_args cmd;
+ int vpid;
+
+ if (!vf->tx_ctx_id)
+ return;
+
+ if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_TX_CTX))
+ return;
+
+ vpid = qlcnic_sriov_pf_get_vport_handle(adapter, vf->pci_func);
+ if (vpid >= 0) {
+ cmd.req.arg[1] |= vf->tx_ctx_id | (vpid & 0xffff) << 16;
+ if (qlcnic_issue_cmd(adapter, &cmd))
+ dev_err(&adapter->pdev->dev,
+ "Failed to delete Tx ctx in firmware for func 0x%x\n",
+ vf->pci_func);
+ else
+ vf->tx_ctx_id = 0;
+ }
+
+ qlcnic_free_mbx_args(&cmd);
+}
+
+static int qlcnic_sriov_add_act_list_irqsave(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf,
+ struct qlcnic_bc_trans *trans)
+{
+ struct qlcnic_trans_list *t_list = &vf->rcv_act;
+ unsigned long flag;
+
+ spin_lock_irqsave(&t_list->lock, flag);
+
+ __qlcnic_sriov_add_act_list(sriov, vf, trans);
+
+ spin_unlock_irqrestore(&t_list->lock, flag);
+ return 0;
+}
+
+static void __qlcnic_sriov_process_flr(struct qlcnic_vf_info *vf)
+{
+ struct qlcnic_adapter *adapter = vf->adapter;
+
+ qlcnic_sriov_cleanup_list(&vf->rcv_pend);
+ cancel_work_sync(&vf->trans_work);
+ qlcnic_sriov_cleanup_list(&vf->rcv_act);
+
+ if (test_bit(QLC_BC_VF_SOFT_FLR, &vf->state)) {
+ qlcnic_sriov_del_tx_ctx(adapter, vf);
+ qlcnic_sriov_del_rx_ctx(adapter, vf);
+ }
+
+ qlcnic_sriov_pf_config_vport(adapter, 0, vf->pci_func);
+
+ clear_bit(QLC_BC_VF_FLR, &vf->state);
+ if (test_bit(QLC_BC_VF_SOFT_FLR, &vf->state)) {
+ qlcnic_sriov_add_act_list_irqsave(adapter->ahw->sriov, vf,
+ vf->flr_trans);
+ clear_bit(QLC_BC_VF_SOFT_FLR, &vf->state);
+ vf->flr_trans = NULL;
+ }
+}
+
+static void qlcnic_sriov_pf_process_flr(struct work_struct *work)
+{
+ struct qlcnic_vf_info *vf;
+
+ vf = container_of(work, struct qlcnic_vf_info, flr_work);
+ __qlcnic_sriov_process_flr(vf);
+ return;
+}
+
+static void qlcnic_sriov_schedule_flr(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf,
+ work_func_t func)
+{
+ if (test_bit(__QLCNIC_RESETTING, &vf->adapter->state))
+ return;
+
+ INIT_WORK(&vf->flr_work, func);
+ queue_work(sriov->bc.bc_flr_wq, &vf->flr_work);
+}
+
+static void qlcnic_sriov_handle_soft_flr(struct qlcnic_adapter *adapter,
+ struct qlcnic_bc_trans *trans,
+ struct qlcnic_vf_info *vf)
+{
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+
+ set_bit(QLC_BC_VF_FLR, &vf->state);
+ clear_bit(QLC_BC_VF_STATE, &vf->state);
+ set_bit(QLC_BC_VF_SOFT_FLR, &vf->state);
+ vf->flr_trans = trans;
+ qlcnic_sriov_schedule_flr(sriov, vf, qlcnic_sriov_pf_process_flr);
+ netdev_info(adapter->netdev, "Software FLR for PCI func %d\n",
+ vf->pci_func);
+}
+
+bool qlcnic_sriov_soft_flr_check(struct qlcnic_adapter *adapter,
+ struct qlcnic_bc_trans *trans,
+ struct qlcnic_vf_info *vf)
+{
+ struct qlcnic_bc_hdr *hdr = trans->req_hdr;
+
+ if ((hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) &&
+ (hdr->op_type == QLC_BC_CMD) &&
+ test_bit(QLC_BC_VF_STATE, &vf->state)) {
+ qlcnic_sriov_handle_soft_flr(adapter, trans, vf);
+ return true;
+ }
+
+ return false;
+}
+
+void qlcnic_sriov_pf_handle_flr(struct qlcnic_sriov *sriov,
+ struct qlcnic_vf_info *vf)
+{
+ struct net_device *dev = vf->adapter->netdev;
+ struct qlcnic_vport *vp = vf->vp;
+
+ if (!test_and_clear_bit(QLC_BC_VF_STATE, &vf->state)) {
+ clear_bit(QLC_BC_VF_FLR, &vf->state);
+ return;
+ }
+
+ if (test_and_set_bit(QLC_BC_VF_FLR, &vf->state)) {
+ netdev_info(dev, "FLR for PCI func %d in progress\n",
+ vf->pci_func);
+ return;
+ }
+
+ if (vp->vlan_mode == QLC_GUEST_VLAN_MODE)
+ memset(vf->sriov_vlans, 0,
+ sizeof(*vf->sriov_vlans) * sriov->num_allowed_vlans);
+
+ qlcnic_sriov_schedule_flr(sriov, vf, qlcnic_sriov_pf_process_flr);
+ netdev_info(dev, "FLR received for PCI func %d\n", vf->pci_func);
+}
+
+void qlcnic_sriov_pf_reset(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_sriov *sriov = ahw->sriov;
+ struct qlcnic_vf_info *vf;
+ u16 num_vfs = sriov->num_vfs;
+ int i;
+
+ for (i = 0; i < num_vfs; i++) {
+ vf = &sriov->vf_info[i];
+ vf->rx_ctx_id = 0;
+ vf->tx_ctx_id = 0;
+ cancel_work_sync(&vf->flr_work);
+ __qlcnic_sriov_process_flr(vf);
+ clear_bit(QLC_BC_VF_STATE, &vf->state);
+ }
+
+ qlcnic_sriov_pf_reset_vport_handle(adapter, ahw->pci_func);
+ QLCWRX(ahw, QLCNIC_MBX_INTR_ENBL, (ahw->num_msix - 1) << 8);
+}
+
+int qlcnic_sriov_pf_reinit(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int err;
+
+ if (!qlcnic_sriov_enable_check(adapter))
+ return 0;
+
+ ahw->op_mode = QLCNIC_SRIOV_PF_FUNC;
+
+ err = qlcnic_sriov_pf_init(adapter);
+ if (err)
+ return err;
+
+ dev_info(&adapter->pdev->dev, "%s: op_mode %d\n",
+ __func__, ahw->op_mode);
+ return err;
+}
+
+int qlcnic_sriov_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ int i, num_vfs;
+ struct qlcnic_vf_info *vf_info;
+ u8 *curr_mac;
+
+ if (!qlcnic_sriov_pf_check(adapter))
+ return -EOPNOTSUPP;
+
+ num_vfs = sriov->num_vfs;
+
+ if (!is_valid_ether_addr(mac) || vf >= num_vfs)
+ return -EINVAL;
+
+ if (ether_addr_equal(adapter->mac_addr, mac)) {
+ netdev_err(netdev, "MAC address is already in use by the PF\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num_vfs; i++) {
+ vf_info = &sriov->vf_info[i];
+ if (ether_addr_equal(vf_info->vp->mac, mac)) {
+ netdev_err(netdev,
+ "MAC address is already in use by VF %d\n",
+ i);
+ return -EINVAL;
+ }
+ }
+
+ vf_info = &sriov->vf_info[vf];
+ curr_mac = vf_info->vp->mac;
+
+ if (test_bit(QLC_BC_VF_STATE, &vf_info->state)) {
+ netdev_err(netdev,
+ "MAC address change failed for VF %d, as VF driver is loaded. Please unload VF driver and retry the operation\n",
+ vf);
+ return -EOPNOTSUPP;
+ }
+
+ memcpy(curr_mac, mac, netdev->addr_len);
+ netdev_info(netdev, "MAC Address %pM is configured for VF %d\n",
+ mac, vf);
+ return 0;
+}
+
+int qlcnic_sriov_set_vf_tx_rate(struct net_device *netdev, int vf,
+ int min_tx_rate, int max_tx_rate)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_vf_info *vf_info;
+ struct qlcnic_info nic_info;
+ struct qlcnic_vport *vp;
+ u16 vpid;
+
+ if (!qlcnic_sriov_pf_check(adapter))
+ return -EOPNOTSUPP;
+
+ if (vf >= sriov->num_vfs)
+ return -EINVAL;
+
+ vf_info = &sriov->vf_info[vf];
+ vp = vf_info->vp;
+ vpid = vp->handle;
+
+ if (!min_tx_rate)
+ min_tx_rate = QLC_VF_MIN_TX_RATE;
+
+ if (max_tx_rate && max_tx_rate >= 10000) {
+ netdev_err(netdev,
+ "Invalid max Tx rate, allowed range is [%d - %d]",
+ min_tx_rate, QLC_VF_MAX_TX_RATE);
+ return -EINVAL;
+ }
+
+ if (!max_tx_rate)
+ max_tx_rate = 10000;
+
+ if (min_tx_rate && min_tx_rate < QLC_VF_MIN_TX_RATE) {
+ netdev_err(netdev,
+ "Invalid min Tx rate, allowed range is [%d - %d]",
+ QLC_VF_MIN_TX_RATE, max_tx_rate);
+ return -EINVAL;
+ }
+
+ if (test_bit(QLC_BC_VF_STATE, &vf_info->state)) {
+ if (qlcnic_sriov_get_vf_vport_info(adapter, &nic_info, vpid))
+ return -EIO;
+
+ nic_info.max_tx_bw = max_tx_rate / 100;
+ nic_info.min_tx_bw = min_tx_rate / 100;
+ nic_info.bit_offsets = BIT_0;
+
+ if (qlcnic_sriov_pf_set_vport_info(adapter, &nic_info, vpid))
+ return -EIO;
+ }
+
+ vp->max_tx_bw = max_tx_rate / 100;
+ netdev_info(netdev,
+ "Setting Max Tx rate %d (Mbps), %d %% of PF bandwidth, for VF %d\n",
+ max_tx_rate, vp->max_tx_bw, vf);
+ vp->min_tx_bw = min_tx_rate / 100;
+ netdev_info(netdev,
+ "Setting Min Tx rate %d (Mbps), %d %% of PF bandwidth, for VF %d\n",
+ min_tx_rate, vp->min_tx_bw, vf);
+ return 0;
+}
+
+int qlcnic_sriov_set_vf_vlan(struct net_device *netdev, int vf,
+ u16 vlan, u8 qos, __be16 vlan_proto)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_vf_info *vf_info;
+ struct qlcnic_vport *vp;
+
+ if (!qlcnic_sriov_pf_check(adapter))
+ return -EOPNOTSUPP;
+
+ if (vf >= sriov->num_vfs || qos > 7)
+ return -EINVAL;
+
+ if (vlan_proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
+ if (vlan > MAX_VLAN_ID) {
+ netdev_err(netdev,
+ "Invalid VLAN ID, allowed range is [0 - %d]\n",
+ MAX_VLAN_ID);
+ return -EINVAL;
+ }
+
+ vf_info = &sriov->vf_info[vf];
+ vp = vf_info->vp;
+ if (test_bit(QLC_BC_VF_STATE, &vf_info->state)) {
+ netdev_err(netdev,
+ "VLAN change failed for VF %d, as VF driver is loaded. Please unload VF driver and retry the operation\n",
+ vf);
+ return -EOPNOTSUPP;
+ }
+
+ memset(vf_info->sriov_vlans, 0,
+ sizeof(*vf_info->sriov_vlans) * sriov->num_allowed_vlans);
+
+ switch (vlan) {
+ case 4095:
+ vp->vlan_mode = QLC_GUEST_VLAN_MODE;
+ break;
+ case 0:
+ vp->vlan_mode = QLC_NO_VLAN_MODE;
+ vp->qos = 0;
+ break;
+ default:
+ vp->vlan_mode = QLC_PVID_MODE;
+ qlcnic_sriov_add_vlan_id(sriov, vf_info, vlan);
+ vp->qos = qos;
+ vp->pvid = vlan;
+ }
+
+ netdev_info(netdev, "Setting VLAN %d, QoS %d, for VF %d\n",
+ vlan, qos, vf);
+ return 0;
+}
+
+static __u32 qlcnic_sriov_get_vf_vlan(struct qlcnic_adapter *adapter,
+ struct qlcnic_vport *vp, int vf)
+{
+ __u32 vlan = 0;
+
+ switch (vp->vlan_mode) {
+ case QLC_PVID_MODE:
+ vlan = vp->pvid;
+ break;
+ case QLC_GUEST_VLAN_MODE:
+ vlan = MAX_VLAN_ID;
+ break;
+ case QLC_NO_VLAN_MODE:
+ vlan = 0;
+ break;
+ default:
+ netdev_info(adapter->netdev, "Invalid VLAN mode = %d for VF %d\n",
+ vp->vlan_mode, vf);
+ }
+
+ return vlan;
+}
+
+int qlcnic_sriov_get_vf_config(struct net_device *netdev,
+ int vf, struct ifla_vf_info *ivi)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_vport *vp;
+
+ if (!qlcnic_sriov_pf_check(adapter))
+ return -EOPNOTSUPP;
+
+ if (vf >= sriov->num_vfs)
+ return -EINVAL;
+
+ vp = sriov->vf_info[vf].vp;
+ memcpy(&ivi->mac, vp->mac, ETH_ALEN);
+ ivi->vlan = qlcnic_sriov_get_vf_vlan(adapter, vp, vf);
+ ivi->qos = vp->qos;
+ ivi->spoofchk = vp->spoofchk;
+ if (vp->max_tx_bw == MAX_BW)
+ ivi->max_tx_rate = 0;
+ else
+ ivi->max_tx_rate = vp->max_tx_bw * 100;
+ if (vp->min_tx_bw == MIN_BW)
+ ivi->min_tx_rate = 0;
+ else
+ ivi->min_tx_rate = vp->min_tx_bw * 100;
+
+ ivi->vf = vf;
+ return 0;
+}
+
+int qlcnic_sriov_set_vf_spoofchk(struct net_device *netdev, int vf, bool chk)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct qlcnic_vf_info *vf_info;
+ struct qlcnic_vport *vp;
+
+ if (!qlcnic_sriov_pf_check(adapter))
+ return -EOPNOTSUPP;
+
+ if (vf >= sriov->num_vfs)
+ return -EINVAL;
+
+ vf_info = &sriov->vf_info[vf];
+ vp = vf_info->vp;
+ if (test_bit(QLC_BC_VF_STATE, &vf_info->state)) {
+ netdev_err(netdev,
+ "Spoof check change failed for VF %d, as VF driver is loaded. Please unload VF driver and retry the operation\n",
+ vf);
+ return -EOPNOTSUPP;
+ }
+
+ vp->spoofchk = chk;
+ return 0;
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
new file mode 100644
index 000000000..5c2edb715
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -0,0 +1,1431 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ */
+
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/swab.h>
+#include <linux/dma-mapping.h>
+#include <net/ip.h>
+#include <linux/ipv6.h>
+#include <linux/inetdevice.h>
+#include <linux/sysfs.h>
+#include <linux/aer.h>
+#include <linux/log2.h>
+#ifdef CONFIG_QLCNIC_HWMON
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#endif
+
+#include "qlcnic.h"
+#include "qlcnic_hw.h"
+
+int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
+{
+ return -EOPNOTSUPP;
+}
+
+int qlcnicvf_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
+{
+ return -EOPNOTSUPP;
+}
+
+static ssize_t qlcnic_store_bridged_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ unsigned long new;
+ int ret = -EINVAL;
+
+ if (!(adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_BDG))
+ goto err_out;
+
+ if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
+ goto err_out;
+
+ if (kstrtoul(buf, 2, &new))
+ goto err_out;
+
+ if (!qlcnic_config_bridged_mode(adapter, !!new))
+ ret = len;
+
+err_out:
+ return ret;
+}
+
+static ssize_t qlcnic_show_bridged_mode(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ int bridged_mode = 0;
+
+ if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_BDG)
+ bridged_mode = !!(adapter->flags & QLCNIC_BRIDGE_ENABLED);
+
+ return sprintf(buf, "%d\n", bridged_mode);
+}
+
+static ssize_t qlcnic_store_diag_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ unsigned long new;
+
+ if (kstrtoul(buf, 2, &new))
+ return -EINVAL;
+
+ if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED))
+ adapter->flags ^= QLCNIC_DIAG_ENABLED;
+
+ return len;
+}
+
+static ssize_t qlcnic_show_diag_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ return sprintf(buf, "%d\n", !!(adapter->flags & QLCNIC_DIAG_ENABLED));
+}
+
+static int qlcnic_validate_beacon(struct qlcnic_adapter *adapter, u16 beacon,
+ u8 *state, u8 *rate)
+{
+ *rate = LSB(beacon);
+ *state = MSB(beacon);
+
+ QLCDB(adapter, DRV, "rate %x state %x\n", *rate, *state);
+
+ if (!*state) {
+ *rate = __QLCNIC_MAX_LED_RATE;
+ return 0;
+ } else if (*state > __QLCNIC_MAX_LED_STATE) {
+ return -EINVAL;
+ }
+
+ if ((!*rate) || (*rate > __QLCNIC_MAX_LED_RATE))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int qlcnic_83xx_store_beacon(struct qlcnic_adapter *adapter,
+ const char *buf, size_t len)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ unsigned long h_beacon;
+ int err;
+
+ if (test_bit(__QLCNIC_RESETTING, &adapter->state))
+ return -EIO;
+
+ if (kstrtoul(buf, 2, &h_beacon))
+ return -EINVAL;
+
+ qlcnic_get_beacon_state(adapter);
+
+ if (ahw->beacon_state == h_beacon)
+ return len;
+
+ rtnl_lock();
+ if (!ahw->beacon_state) {
+ if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state)) {
+ rtnl_unlock();
+ return -EBUSY;
+ }
+ }
+
+ if (h_beacon)
+ err = qlcnic_83xx_config_led(adapter, 1, h_beacon);
+ else
+ err = qlcnic_83xx_config_led(adapter, 0, !h_beacon);
+ if (!err)
+ ahw->beacon_state = h_beacon;
+
+ if (!ahw->beacon_state)
+ clear_bit(__QLCNIC_LED_ENABLE, &adapter->state);
+
+ rtnl_unlock();
+ return len;
+}
+
+static int qlcnic_82xx_store_beacon(struct qlcnic_adapter *adapter,
+ const char *buf, size_t len)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int err, drv_sds_rings = adapter->drv_sds_rings;
+ u16 beacon;
+ u8 b_state, b_rate;
+
+ if (len != sizeof(u16))
+ return -EINVAL;
+
+ memcpy(&beacon, buf, sizeof(u16));
+ err = qlcnic_validate_beacon(adapter, beacon, &b_state, &b_rate);
+ if (err)
+ return err;
+
+ qlcnic_get_beacon_state(adapter);
+
+ if (ahw->beacon_state == b_state)
+ return len;
+
+ rtnl_lock();
+ if (!ahw->beacon_state) {
+ if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state)) {
+ rtnl_unlock();
+ return -EBUSY;
+ }
+ }
+
+ if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
+ err = -EIO;
+ goto out;
+ }
+
+ if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
+ err = qlcnic_diag_alloc_res(adapter->netdev, QLCNIC_LED_TEST);
+ if (err)
+ goto out;
+ set_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state);
+ }
+
+ err = qlcnic_config_led(adapter, b_state, b_rate);
+ if (!err) {
+ err = len;
+ ahw->beacon_state = b_state;
+ }
+
+ if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state))
+ qlcnic_diag_free_res(adapter->netdev, drv_sds_rings);
+
+out:
+ if (!ahw->beacon_state)
+ clear_bit(__QLCNIC_LED_ENABLE, &adapter->state);
+ rtnl_unlock();
+
+ return err;
+}
+
+static ssize_t qlcnic_store_beacon(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ int err = 0;
+
+ if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
+ dev_warn(dev,
+ "LED test not supported in non privileged mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (qlcnic_82xx_check(adapter))
+ err = qlcnic_82xx_store_beacon(adapter, buf, len);
+ else if (qlcnic_83xx_check(adapter))
+ err = qlcnic_83xx_store_beacon(adapter, buf, len);
+ else
+ return -EIO;
+
+ return err;
+}
+
+static ssize_t qlcnic_show_beacon(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", adapter->ahw->beacon_state);
+}
+
+static int qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
+ loff_t offset, size_t size)
+{
+ size_t crb_size = 4;
+
+ if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
+ return -EIO;
+
+ if (offset < QLCNIC_PCI_CRBSPACE) {
+ if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM,
+ QLCNIC_PCI_CAMQM_END))
+ crb_size = 8;
+ else
+ return -EINVAL;
+ }
+
+ if ((size != crb_size) || (offset & (crb_size-1)))
+ return -EINVAL;
+
+ return 0;
+}
+
+static ssize_t qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t offset, size_t size)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ int ret;
+
+ ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
+ if (ret != 0)
+ return ret;
+ qlcnic_read_crb(adapter, buf, offset, size);
+ qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
+
+ return size;
+}
+
+static ssize_t qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t offset, size_t size)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ int ret;
+
+ ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
+ if (ret != 0)
+ return ret;
+
+ qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
+ qlcnic_write_crb(adapter, buf, offset, size);
+ return size;
+}
+
+static int qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
+ loff_t offset, size_t size)
+{
+ if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
+ return -EIO;
+
+ if ((size != 8) || (offset & 0x7))
+ return -EIO;
+
+ return 0;
+}
+
+static ssize_t qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t offset, size_t size)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ u64 data;
+ int ret;
+
+ ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
+ if (ret != 0)
+ return ret;
+
+ if (qlcnic_pci_mem_read_2M(adapter, offset, &data))
+ return -EIO;
+
+ memcpy(buf, &data, size);
+ qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
+
+ return size;
+}
+
+static ssize_t qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t offset, size_t size)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ u64 data;
+ int ret;
+
+ ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
+ if (ret != 0)
+ return ret;
+
+ qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
+ memcpy(&data, buf, size);
+
+ if (qlcnic_pci_mem_write_2M(adapter, offset, data))
+ return -EIO;
+
+ return size;
+}
+
+int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func)
+{
+ int i;
+
+ for (i = 0; i < adapter->ahw->total_nic_func; i++) {
+ if (adapter->npars[i].pci_func == pci_func)
+ return i;
+ }
+
+ dev_err(&adapter->pdev->dev, "%s: Invalid nic function\n", __func__);
+ return -EINVAL;
+}
+
+static int validate_pm_config(struct qlcnic_adapter *adapter,
+ struct qlcnic_pm_func_cfg *pm_cfg, int count)
+{
+ u8 src_pci_func, s_esw_id, d_esw_id;
+ u8 dest_pci_func;
+ int i, src_index, dest_index;
+
+ for (i = 0; i < count; i++) {
+ src_pci_func = pm_cfg[i].pci_func;
+ dest_pci_func = pm_cfg[i].dest_npar;
+ src_index = qlcnic_is_valid_nic_func(adapter, src_pci_func);
+ if (src_index < 0)
+ return -EINVAL;
+
+ dest_index = qlcnic_is_valid_nic_func(adapter, dest_pci_func);
+ if (dest_index < 0)
+ return -EINVAL;
+
+ s_esw_id = adapter->npars[src_index].phy_port;
+ d_esw_id = adapter->npars[dest_index].phy_port;
+
+ if (s_esw_id != d_esw_id)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t offset,
+ size_t size)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ struct qlcnic_pm_func_cfg *pm_cfg;
+ u32 id, action, pci_func;
+ int count, rem, i, ret, index;
+
+ count = size / sizeof(struct qlcnic_pm_func_cfg);
+ rem = size % sizeof(struct qlcnic_pm_func_cfg);
+ if (rem)
+ return -EINVAL;
+
+ qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
+ pm_cfg = (struct qlcnic_pm_func_cfg *)buf;
+ ret = validate_pm_config(adapter, pm_cfg, count);
+
+ if (ret)
+ return ret;
+ for (i = 0; i < count; i++) {
+ pci_func = pm_cfg[i].pci_func;
+ action = !!pm_cfg[i].action;
+ index = qlcnic_is_valid_nic_func(adapter, pci_func);
+ if (index < 0)
+ return -EINVAL;
+
+ id = adapter->npars[index].phy_port;
+ ret = qlcnic_config_port_mirroring(adapter, id,
+ action, pci_func);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < count; i++) {
+ pci_func = pm_cfg[i].pci_func;
+ index = qlcnic_is_valid_nic_func(adapter, pci_func);
+ if (index < 0)
+ return -EINVAL;
+ id = adapter->npars[index].phy_port;
+ adapter->npars[index].enable_pm = !!pm_cfg[i].action;
+ adapter->npars[index].dest_npar = id;
+ }
+
+ return size;
+}
+
+static ssize_t qlcnic_sysfs_read_pm_config(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t offset,
+ size_t size)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ struct qlcnic_pm_func_cfg *pm_cfg;
+ u8 pci_func;
+ u32 count;
+ int i;
+
+ memset(buf, 0, size);
+ pm_cfg = (struct qlcnic_pm_func_cfg *)buf;
+ count = size / sizeof(struct qlcnic_pm_func_cfg);
+ for (i = 0; i < adapter->ahw->total_nic_func; i++) {
+ pci_func = adapter->npars[i].pci_func;
+ if (pci_func >= count) {
+ dev_dbg(dev, "%s: Total nic functions[%d], App sent function count[%d]\n",
+ __func__, adapter->ahw->total_nic_func, count);
+ continue;
+ }
+ if (!adapter->npars[i].eswitch_status)
+ continue;
+
+ pm_cfg[pci_func].action = adapter->npars[i].enable_pm;
+ pm_cfg[pci_func].dest_npar = 0;
+ pm_cfg[pci_func].pci_func = i;
+ }
+ qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
+ return size;
+}
+
+static int validate_esw_config(struct qlcnic_adapter *adapter,
+ struct qlcnic_esw_func_cfg *esw_cfg, int count)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int i, ret;
+ u32 op_mode;
+ u8 pci_func;
+
+ if (qlcnic_82xx_check(adapter))
+ op_mode = readl(ahw->pci_base0 + QLCNIC_DRV_OP_MODE);
+ else
+ op_mode = QLCRDX(ahw, QLC_83XX_DRV_OP_MODE);
+
+ for (i = 0; i < count; i++) {
+ pci_func = esw_cfg[i].pci_func;
+ if (pci_func >= ahw->max_vnic_func)
+ return -EINVAL;
+
+ if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
+ if (qlcnic_is_valid_nic_func(adapter, pci_func) < 0)
+ return -EINVAL;
+
+ switch (esw_cfg[i].op_mode) {
+ case QLCNIC_PORT_DEFAULTS:
+ if (qlcnic_82xx_check(adapter)) {
+ ret = QLC_DEV_GET_DRV(op_mode, pci_func);
+ } else {
+ ret = QLC_83XX_GET_FUNC_PRIVILEGE(op_mode,
+ pci_func);
+ esw_cfg[i].offload_flags = 0;
+ }
+
+ if (ret != QLCNIC_NON_PRIV_FUNC) {
+ if (esw_cfg[i].mac_anti_spoof != 0)
+ return -EINVAL;
+ if (esw_cfg[i].mac_override != 1)
+ return -EINVAL;
+ if (esw_cfg[i].promisc_mode != 1)
+ return -EINVAL;
+ }
+ break;
+ case QLCNIC_ADD_VLAN:
+ if (!IS_VALID_VLAN(esw_cfg[i].vlan_id))
+ return -EINVAL;
+ if (!esw_cfg[i].op_type)
+ return -EINVAL;
+ break;
+ case QLCNIC_DEL_VLAN:
+ if (!esw_cfg[i].op_type)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static ssize_t qlcnic_sysfs_write_esw_config(struct file *file,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t offset,
+ size_t size)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ struct qlcnic_esw_func_cfg *esw_cfg;
+ struct qlcnic_npar_info *npar;
+ int count, rem, i, ret;
+ int index;
+ u8 op_mode = 0, pci_func;
+
+ count = size / sizeof(struct qlcnic_esw_func_cfg);
+ rem = size % sizeof(struct qlcnic_esw_func_cfg);
+ if (rem)
+ return -EINVAL;
+
+ qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
+ esw_cfg = (struct qlcnic_esw_func_cfg *)buf;
+ ret = validate_esw_config(adapter, esw_cfg, count);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < count; i++) {
+ if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
+ if (qlcnic_config_switch_port(adapter, &esw_cfg[i]))
+ return -EINVAL;
+
+ if (adapter->ahw->pci_func != esw_cfg[i].pci_func)
+ continue;
+
+ op_mode = esw_cfg[i].op_mode;
+ qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]);
+ esw_cfg[i].op_mode = op_mode;
+ esw_cfg[i].pci_func = adapter->ahw->pci_func;
+
+ switch (esw_cfg[i].op_mode) {
+ case QLCNIC_PORT_DEFAULTS:
+ qlcnic_set_eswitch_port_features(adapter, &esw_cfg[i]);
+ rtnl_lock();
+ qlcnic_set_netdev_features(adapter, &esw_cfg[i]);
+ rtnl_unlock();
+ break;
+ case QLCNIC_ADD_VLAN:
+ qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
+ break;
+ case QLCNIC_DEL_VLAN:
+ esw_cfg[i].vlan_id = 0;
+ qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
+ break;
+ }
+ }
+
+ if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
+ goto out;
+
+ for (i = 0; i < count; i++) {
+ pci_func = esw_cfg[i].pci_func;
+ index = qlcnic_is_valid_nic_func(adapter, pci_func);
+ if (index < 0)
+ return -EINVAL;
+ npar = &adapter->npars[index];
+ switch (esw_cfg[i].op_mode) {
+ case QLCNIC_PORT_DEFAULTS:
+ npar->promisc_mode = esw_cfg[i].promisc_mode;
+ npar->mac_override = esw_cfg[i].mac_override;
+ npar->offload_flags = esw_cfg[i].offload_flags;
+ npar->mac_anti_spoof = esw_cfg[i].mac_anti_spoof;
+ npar->discard_tagged = esw_cfg[i].discard_tagged;
+ break;
+ case QLCNIC_ADD_VLAN:
+ npar->pvid = esw_cfg[i].vlan_id;
+ break;
+ case QLCNIC_DEL_VLAN:
+ npar->pvid = 0;
+ break;
+ }
+ }
+out:
+ return size;
+}
+
+static ssize_t qlcnic_sysfs_read_esw_config(struct file *file,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t offset,
+ size_t size)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ struct qlcnic_esw_func_cfg *esw_cfg;
+ u8 pci_func;
+ u32 count;
+ int i;
+
+ memset(buf, 0, size);
+ esw_cfg = (struct qlcnic_esw_func_cfg *)buf;
+ count = size / sizeof(struct qlcnic_esw_func_cfg);
+ for (i = 0; i < adapter->ahw->total_nic_func; i++) {
+ pci_func = adapter->npars[i].pci_func;
+ if (pci_func >= count) {
+ dev_dbg(dev, "%s: Total nic functions[%d], App sent function count[%d]\n",
+ __func__, adapter->ahw->total_nic_func, count);
+ continue;
+ }
+ if (!adapter->npars[i].eswitch_status)
+ continue;
+
+ esw_cfg[pci_func].pci_func = pci_func;
+ if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[pci_func]))
+ return -EINVAL;
+ }
+ qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
+ return size;
+}
+
+static int validate_npar_config(struct qlcnic_adapter *adapter,
+ struct qlcnic_npar_func_cfg *np_cfg,
+ int count)
+{
+ u8 pci_func, i;
+
+ for (i = 0; i < count; i++) {
+ pci_func = np_cfg[i].pci_func;
+ if (qlcnic_is_valid_nic_func(adapter, pci_func) < 0)
+ return -EINVAL;
+
+ if (!IS_VALID_BW(np_cfg[i].min_bw) ||
+ !IS_VALID_BW(np_cfg[i].max_bw))
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static ssize_t qlcnic_sysfs_write_npar_config(struct file *file,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t offset,
+ size_t size)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ struct qlcnic_info nic_info;
+ struct qlcnic_npar_func_cfg *np_cfg;
+ int i, count, rem, ret, index;
+ u8 pci_func;
+
+ count = size / sizeof(struct qlcnic_npar_func_cfg);
+ rem = size % sizeof(struct qlcnic_npar_func_cfg);
+ if (rem)
+ return -EINVAL;
+
+ qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
+ np_cfg = (struct qlcnic_npar_func_cfg *)buf;
+ ret = validate_npar_config(adapter, np_cfg, count);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < count; i++) {
+ pci_func = np_cfg[i].pci_func;
+
+ memset(&nic_info, 0, sizeof(struct qlcnic_info));
+ ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
+ if (ret)
+ return ret;
+ nic_info.pci_func = pci_func;
+ nic_info.min_tx_bw = np_cfg[i].min_bw;
+ nic_info.max_tx_bw = np_cfg[i].max_bw;
+ ret = qlcnic_set_nic_info(adapter, &nic_info);
+ if (ret)
+ return ret;
+ index = qlcnic_is_valid_nic_func(adapter, pci_func);
+ if (index < 0)
+ return -EINVAL;
+ adapter->npars[index].min_bw = nic_info.min_tx_bw;
+ adapter->npars[index].max_bw = nic_info.max_tx_bw;
+ }
+
+ return size;
+}
+
+static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t offset,
+ size_t size)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ struct qlcnic_npar_func_cfg *np_cfg;
+ struct qlcnic_info nic_info;
+ u8 pci_func;
+ int i, ret;
+ u32 count;
+
+ memset(&nic_info, 0, sizeof(struct qlcnic_info));
+ memset(buf, 0, size);
+ np_cfg = (struct qlcnic_npar_func_cfg *)buf;
+
+ count = size / sizeof(struct qlcnic_npar_func_cfg);
+ for (i = 0; i < adapter->ahw->total_nic_func; i++) {
+ if (adapter->npars[i].pci_func >= count) {
+ dev_dbg(dev, "%s: Total nic functions[%d], App sent function count[%d]\n",
+ __func__, adapter->ahw->total_nic_func, count);
+ continue;
+ }
+ if (!adapter->npars[i].eswitch_status)
+ continue;
+ pci_func = adapter->npars[i].pci_func;
+ if (qlcnic_is_valid_nic_func(adapter, pci_func) < 0)
+ continue;
+ ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
+ if (ret)
+ return ret;
+
+ np_cfg[pci_func].pci_func = pci_func;
+ np_cfg[pci_func].op_mode = (u8)nic_info.op_mode;
+ np_cfg[pci_func].port_num = nic_info.phys_port;
+ np_cfg[pci_func].fw_capab = nic_info.capabilities;
+ np_cfg[pci_func].min_bw = nic_info.min_tx_bw;
+ np_cfg[pci_func].max_bw = nic_info.max_tx_bw;
+ np_cfg[pci_func].max_tx_queues = nic_info.max_tx_ques;
+ np_cfg[pci_func].max_rx_queues = nic_info.max_rx_ques;
+ }
+ qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
+ return size;
+}
+
+static ssize_t qlcnic_sysfs_get_port_stats(struct file *file,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t offset,
+ size_t size)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ struct qlcnic_esw_statistics port_stats;
+ int ret;
+
+ if (qlcnic_83xx_check(adapter))
+ return -EOPNOTSUPP;
+
+ if (size != sizeof(struct qlcnic_esw_statistics))
+ return -EINVAL;
+
+ if (offset >= adapter->ahw->max_vnic_func)
+ return -EINVAL;
+
+ memset(&port_stats, 0, size);
+ ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
+ &port_stats.rx);
+ if (ret)
+ return ret;
+
+ ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
+ &port_stats.tx);
+ if (ret)
+ return ret;
+
+ memcpy(buf, &port_stats, size);
+ return size;
+}
+
+static ssize_t qlcnic_sysfs_get_esw_stats(struct file *file,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t offset,
+ size_t size)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ struct qlcnic_esw_statistics esw_stats;
+ int ret;
+
+ if (qlcnic_83xx_check(adapter))
+ return -EOPNOTSUPP;
+
+ if (size != sizeof(struct qlcnic_esw_statistics))
+ return -EINVAL;
+
+ if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
+ return -EINVAL;
+
+ memset(&esw_stats, 0, size);
+ ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
+ &esw_stats.rx);
+ if (ret)
+ return ret;
+
+ ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
+ &esw_stats.tx);
+ if (ret)
+ return ret;
+
+ memcpy(buf, &esw_stats, size);
+ return size;
+}
+
+static ssize_t qlcnic_sysfs_clear_esw_stats(struct file *file,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t offset,
+ size_t size)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ int ret;
+
+ if (qlcnic_83xx_check(adapter))
+ return -EOPNOTSUPP;
+
+ if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
+ return -EINVAL;
+
+ ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
+ QLCNIC_QUERY_RX_COUNTER);
+ if (ret)
+ return ret;
+
+ ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
+ QLCNIC_QUERY_TX_COUNTER);
+ if (ret)
+ return ret;
+
+ return size;
+}
+
+static ssize_t qlcnic_sysfs_clear_port_stats(struct file *file,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t offset,
+ size_t size)
+{
+
+ struct device *dev = kobj_to_dev(kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ int ret;
+
+ if (qlcnic_83xx_check(adapter))
+ return -EOPNOTSUPP;
+
+ if (offset >= adapter->ahw->max_vnic_func)
+ return -EINVAL;
+
+ ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
+ QLCNIC_QUERY_RX_COUNTER);
+ if (ret)
+ return ret;
+
+ ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
+ QLCNIC_QUERY_TX_COUNTER);
+ if (ret)
+ return ret;
+
+ return size;
+}
+
+static ssize_t qlcnic_sysfs_read_pci_config(struct file *file,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t offset,
+ size_t size)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ struct qlcnic_pci_func_cfg *pci_cfg;
+ struct qlcnic_pci_info *pci_info;
+ int i, ret;
+ u32 count;
+
+ pci_info = kcalloc(size, sizeof(*pci_info), GFP_KERNEL);
+ if (!pci_info)
+ return -ENOMEM;
+
+ ret = qlcnic_get_pci_info(adapter, pci_info);
+ if (ret) {
+ kfree(pci_info);
+ return ret;
+ }
+
+ pci_cfg = (struct qlcnic_pci_func_cfg *)buf;
+ count = size / sizeof(struct qlcnic_pci_func_cfg);
+ qlcnic_swap32_buffer((u32 *)pci_info, size / sizeof(u32));
+ for (i = 0; i < count; i++) {
+ pci_cfg[i].pci_func = pci_info[i].id;
+ pci_cfg[i].func_type = pci_info[i].type;
+ pci_cfg[i].func_state = 0;
+ pci_cfg[i].port_num = pci_info[i].default_port;
+ pci_cfg[i].min_bw = pci_info[i].tx_min_bw;
+ pci_cfg[i].max_bw = pci_info[i].tx_max_bw;
+ memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN);
+ }
+
+ kfree(pci_info);
+ return size;
+}
+
+static ssize_t qlcnic_83xx_sysfs_flash_read_handler(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t offset,
+ size_t size)
+{
+ unsigned char *p_read_buf;
+ int ret, count;
+ struct device *dev = kobj_to_dev(kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+
+ if (!size)
+ return -EINVAL;
+
+ count = size / sizeof(u32);
+
+ if (size % sizeof(u32))
+ count++;
+
+ p_read_buf = kcalloc(size, sizeof(unsigned char), GFP_KERNEL);
+ if (!p_read_buf)
+ return -ENOMEM;
+ if (qlcnic_83xx_lock_flash(adapter) != 0) {
+ kfree(p_read_buf);
+ return -EIO;
+ }
+
+ ret = qlcnic_83xx_lockless_flash_read32(adapter, offset, p_read_buf,
+ count);
+
+ if (ret) {
+ qlcnic_83xx_unlock_flash(adapter);
+ kfree(p_read_buf);
+ return ret;
+ }
+
+ qlcnic_83xx_unlock_flash(adapter);
+ qlcnic_swap32_buffer((u32 *)p_read_buf, count);
+ memcpy(buf, p_read_buf, size);
+ kfree(p_read_buf);
+
+ return size;
+}
+
+static int qlcnic_83xx_sysfs_flash_bulk_write(struct qlcnic_adapter *adapter,
+ char *buf, loff_t offset,
+ size_t size)
+{
+ int i, ret, count;
+ unsigned char *p_cache, *p_src;
+
+ p_cache = kcalloc(size, sizeof(unsigned char), GFP_KERNEL);
+ if (!p_cache)
+ return -ENOMEM;
+
+ count = size / sizeof(u32);
+ qlcnic_swap32_buffer((u32 *)buf, count);
+ memcpy(p_cache, buf, size);
+ p_src = p_cache;
+
+ if (qlcnic_83xx_lock_flash(adapter) != 0) {
+ kfree(p_cache);
+ return -EIO;
+ }
+
+ if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
+ ret = qlcnic_83xx_enable_flash_write(adapter);
+ if (ret) {
+ kfree(p_cache);
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+ }
+
+ for (i = 0; i < count / QLC_83XX_FLASH_WRITE_MAX; i++) {
+ ret = qlcnic_83xx_flash_bulk_write(adapter, offset,
+ (u32 *)p_src,
+ QLC_83XX_FLASH_WRITE_MAX);
+
+ if (ret) {
+ if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
+ ret = qlcnic_83xx_disable_flash_write(adapter);
+ if (ret) {
+ kfree(p_cache);
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+ }
+
+ kfree(p_cache);
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+
+ p_src = p_src + sizeof(u32)*QLC_83XX_FLASH_WRITE_MAX;
+ offset = offset + sizeof(u32)*QLC_83XX_FLASH_WRITE_MAX;
+ }
+
+ if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
+ ret = qlcnic_83xx_disable_flash_write(adapter);
+ if (ret) {
+ kfree(p_cache);
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+ }
+
+ kfree(p_cache);
+ qlcnic_83xx_unlock_flash(adapter);
+
+ return 0;
+}
+
+static int qlcnic_83xx_sysfs_flash_write(struct qlcnic_adapter *adapter,
+ char *buf, loff_t offset, size_t size)
+{
+ int i, ret, count;
+ unsigned char *p_cache, *p_src;
+
+ p_cache = kcalloc(size, sizeof(unsigned char), GFP_KERNEL);
+ if (!p_cache)
+ return -ENOMEM;
+
+ qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
+ memcpy(p_cache, buf, size);
+ p_src = p_cache;
+ count = size / sizeof(u32);
+
+ if (qlcnic_83xx_lock_flash(adapter) != 0) {
+ kfree(p_cache);
+ return -EIO;
+ }
+
+ if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
+ ret = qlcnic_83xx_enable_flash_write(adapter);
+ if (ret) {
+ kfree(p_cache);
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+ }
+
+ for (i = 0; i < count; i++) {
+ ret = qlcnic_83xx_flash_write32(adapter, offset, (u32 *)p_src);
+ if (ret) {
+ if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
+ ret = qlcnic_83xx_disable_flash_write(adapter);
+ if (ret) {
+ kfree(p_cache);
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+ }
+ kfree(p_cache);
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+
+ p_src = p_src + sizeof(u32);
+ offset = offset + sizeof(u32);
+ }
+
+ if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
+ ret = qlcnic_83xx_disable_flash_write(adapter);
+ if (ret) {
+ kfree(p_cache);
+ qlcnic_83xx_unlock_flash(adapter);
+ return -EIO;
+ }
+ }
+
+ kfree(p_cache);
+ qlcnic_83xx_unlock_flash(adapter);
+
+ return 0;
+}
+
+static ssize_t qlcnic_83xx_sysfs_flash_write_handler(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t offset,
+ size_t size)
+{
+ int ret;
+ static int flash_mode;
+ unsigned long data;
+ struct device *dev = kobj_to_dev(kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+
+ ret = kstrtoul(buf, 16, &data);
+ if (ret)
+ return ret;
+
+ switch (data) {
+ case QLC_83XX_FLASH_SECTOR_ERASE_CMD:
+ flash_mode = QLC_83XX_ERASE_MODE;
+ ret = qlcnic_83xx_erase_flash_sector(adapter, offset);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "%s failed at %d\n", __func__, __LINE__);
+ return -EIO;
+ }
+ break;
+
+ case QLC_83XX_FLASH_BULK_WRITE_CMD:
+ flash_mode = QLC_83XX_BULK_WRITE_MODE;
+ break;
+
+ case QLC_83XX_FLASH_WRITE_CMD:
+ flash_mode = QLC_83XX_WRITE_MODE;
+ break;
+ default:
+ if (flash_mode == QLC_83XX_BULK_WRITE_MODE) {
+ ret = qlcnic_83xx_sysfs_flash_bulk_write(adapter, buf,
+ offset, size);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "%s failed at %d\n",
+ __func__, __LINE__);
+ return -EIO;
+ }
+ }
+
+ if (flash_mode == QLC_83XX_WRITE_MODE) {
+ ret = qlcnic_83xx_sysfs_flash_write(adapter, buf,
+ offset, size);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "%s failed at %d\n", __func__,
+ __LINE__);
+ return -EIO;
+ }
+ }
+ }
+
+ return size;
+}
+
+static const struct device_attribute dev_attr_bridged_mode = {
+ .attr = { .name = "bridged_mode", .mode = 0644 },
+ .show = qlcnic_show_bridged_mode,
+ .store = qlcnic_store_bridged_mode,
+};
+
+static const struct device_attribute dev_attr_diag_mode = {
+ .attr = { .name = "diag_mode", .mode = 0644 },
+ .show = qlcnic_show_diag_mode,
+ .store = qlcnic_store_diag_mode,
+};
+
+static const struct device_attribute dev_attr_beacon = {
+ .attr = { .name = "beacon", .mode = 0644 },
+ .show = qlcnic_show_beacon,
+ .store = qlcnic_store_beacon,
+};
+
+static const struct bin_attribute bin_attr_crb = {
+ .attr = { .name = "crb", .mode = 0644 },
+ .size = 0,
+ .read = qlcnic_sysfs_read_crb,
+ .write = qlcnic_sysfs_write_crb,
+};
+
+static const struct bin_attribute bin_attr_mem = {
+ .attr = { .name = "mem", .mode = 0644 },
+ .size = 0,
+ .read = qlcnic_sysfs_read_mem,
+ .write = qlcnic_sysfs_write_mem,
+};
+
+static const struct bin_attribute bin_attr_npar_config = {
+ .attr = { .name = "npar_config", .mode = 0644 },
+ .size = 0,
+ .read = qlcnic_sysfs_read_npar_config,
+ .write = qlcnic_sysfs_write_npar_config,
+};
+
+static const struct bin_attribute bin_attr_pci_config = {
+ .attr = { .name = "pci_config", .mode = 0644 },
+ .size = 0,
+ .read = qlcnic_sysfs_read_pci_config,
+ .write = NULL,
+};
+
+static const struct bin_attribute bin_attr_port_stats = {
+ .attr = { .name = "port_stats", .mode = 0644 },
+ .size = 0,
+ .read = qlcnic_sysfs_get_port_stats,
+ .write = qlcnic_sysfs_clear_port_stats,
+};
+
+static const struct bin_attribute bin_attr_esw_stats = {
+ .attr = { .name = "esw_stats", .mode = 0644 },
+ .size = 0,
+ .read = qlcnic_sysfs_get_esw_stats,
+ .write = qlcnic_sysfs_clear_esw_stats,
+};
+
+static const struct bin_attribute bin_attr_esw_config = {
+ .attr = { .name = "esw_config", .mode = 0644 },
+ .size = 0,
+ .read = qlcnic_sysfs_read_esw_config,
+ .write = qlcnic_sysfs_write_esw_config,
+};
+
+static const struct bin_attribute bin_attr_pm_config = {
+ .attr = { .name = "pm_config", .mode = 0644 },
+ .size = 0,
+ .read = qlcnic_sysfs_read_pm_config,
+ .write = qlcnic_sysfs_write_pm_config,
+};
+
+static const struct bin_attribute bin_attr_flash = {
+ .attr = { .name = "flash", .mode = 0644 },
+ .size = 0,
+ .read = qlcnic_83xx_sysfs_flash_read_handler,
+ .write = qlcnic_83xx_sysfs_flash_write_handler,
+};
+
+#ifdef CONFIG_QLCNIC_HWMON
+
+static ssize_t qlcnic_hwmon_show_temp(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ unsigned int temperature = 0, value = 0;
+
+ if (qlcnic_83xx_check(adapter))
+ value = QLCRDX(adapter->ahw, QLC_83XX_ASIC_TEMP);
+ else if (qlcnic_82xx_check(adapter))
+ value = QLC_SHARED_REG_RD32(adapter, QLCNIC_ASIC_TEMP);
+
+ temperature = qlcnic_get_temp_val(value);
+ /* display millidegree celcius */
+ temperature *= 1000;
+ return sprintf(buf, "%u\n", temperature);
+}
+
+/* hwmon-sysfs attributes */
+static SENSOR_DEVICE_ATTR(temp1_input, 0444,
+ qlcnic_hwmon_show_temp, NULL, 1);
+
+static struct attribute *qlcnic_hwmon_attrs[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(qlcnic_hwmon);
+
+void qlcnic_register_hwmon_dev(struct qlcnic_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+ struct device *hwmon_dev;
+
+ /* Skip hwmon registration for a VF device */
+ if (qlcnic_sriov_vf_check(adapter)) {
+ adapter->ahw->hwmon_dev = NULL;
+ return;
+ }
+ hwmon_dev = hwmon_device_register_with_groups(dev, qlcnic_driver_name,
+ adapter,
+ qlcnic_hwmon_groups);
+ if (IS_ERR(hwmon_dev)) {
+ dev_err(dev, "Cannot register with hwmon, err=%ld\n",
+ PTR_ERR(hwmon_dev));
+ hwmon_dev = NULL;
+ }
+ adapter->ahw->hwmon_dev = hwmon_dev;
+}
+
+void qlcnic_unregister_hwmon_dev(struct qlcnic_adapter *adapter)
+{
+ struct device *hwmon_dev = adapter->ahw->hwmon_dev;
+ if (hwmon_dev) {
+ hwmon_device_unregister(hwmon_dev);
+ adapter->ahw->hwmon_dev = NULL;
+ }
+}
+#endif
+
+void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+
+ if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_BDG)
+ if (device_create_file(dev, &dev_attr_bridged_mode))
+ dev_warn(dev,
+ "failed to create bridged_mode sysfs entry\n");
+}
+
+void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+
+ if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_BDG)
+ device_remove_file(dev, &dev_attr_bridged_mode);
+}
+
+static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+
+ if (device_create_bin_file(dev, &bin_attr_port_stats))
+ dev_info(dev, "failed to create port stats sysfs entry");
+
+ if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC)
+ return;
+ if (device_create_file(dev, &dev_attr_diag_mode))
+ dev_info(dev, "failed to create diag_mode sysfs entry\n");
+ if (device_create_bin_file(dev, &bin_attr_crb))
+ dev_info(dev, "failed to create crb sysfs entry\n");
+ if (device_create_bin_file(dev, &bin_attr_mem))
+ dev_info(dev, "failed to create mem sysfs entry\n");
+
+ if (test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state))
+ return;
+
+ if (device_create_bin_file(dev, &bin_attr_pci_config))
+ dev_info(dev, "failed to create pci config sysfs entry");
+
+ if (device_create_file(dev, &dev_attr_beacon))
+ dev_info(dev, "failed to create beacon sysfs entry");
+
+ if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
+ return;
+ if (device_create_bin_file(dev, &bin_attr_esw_config))
+ dev_info(dev, "failed to create esw config sysfs entry");
+ if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
+ return;
+ if (device_create_bin_file(dev, &bin_attr_npar_config))
+ dev_info(dev, "failed to create npar config sysfs entry");
+ if (device_create_bin_file(dev, &bin_attr_pm_config))
+ dev_info(dev, "failed to create pm config sysfs entry");
+ if (device_create_bin_file(dev, &bin_attr_esw_stats))
+ dev_info(dev, "failed to create eswitch stats sysfs entry");
+}
+
+static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+
+ device_remove_bin_file(dev, &bin_attr_port_stats);
+
+ if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC)
+ return;
+ device_remove_file(dev, &dev_attr_diag_mode);
+ device_remove_bin_file(dev, &bin_attr_crb);
+ device_remove_bin_file(dev, &bin_attr_mem);
+
+ if (test_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state))
+ return;
+
+ device_remove_bin_file(dev, &bin_attr_pci_config);
+ device_remove_file(dev, &dev_attr_beacon);
+ if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
+ return;
+ device_remove_bin_file(dev, &bin_attr_esw_config);
+ if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
+ return;
+ device_remove_bin_file(dev, &bin_attr_npar_config);
+ device_remove_bin_file(dev, &bin_attr_pm_config);
+ device_remove_bin_file(dev, &bin_attr_esw_stats);
+}
+
+void qlcnic_82xx_add_sysfs(struct qlcnic_adapter *adapter)
+{
+ qlcnic_create_diag_entries(adapter);
+}
+
+void qlcnic_82xx_remove_sysfs(struct qlcnic_adapter *adapter)
+{
+ qlcnic_remove_diag_entries(adapter);
+}
+
+void qlcnic_83xx_add_sysfs(struct qlcnic_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+
+ qlcnic_create_diag_entries(adapter);
+
+ if (sysfs_create_bin_file(&dev->kobj, &bin_attr_flash))
+ dev_info(dev, "failed to create flash sysfs entry\n");
+}
+
+void qlcnic_83xx_remove_sysfs(struct qlcnic_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+
+ qlcnic_remove_diag_entries(adapter);
+ sysfs_remove_bin_file(&dev->kobj, &bin_attr_flash);
+}